diff --git a/GNUmakefile b/GNUmakefile index 0052f3af639a..f2384d6fbdbc 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -4,41 +4,16 @@ default: build # mm setup -ifeq ($(ENGINE),tpgtools) - # we specify the product to one that doesn't - # exist so exclusively build base tpgtools implementation - mmv1_compile=-p does-not-exist -else ifneq ($(PRODUCT),) +ifneq ($(PRODUCT),) mmv1_compile=--product $(PRODUCT) endif -# tpgtools setup -ifeq ($(ENGINE),mmv1) - # we specify the product to one that doesn't - # exist so exclusively build base mmv1 implementation - tpgtools_compile = --service does-not-exist -else ifneq ($(PRODUCT),) - tpgtools_compile = --service $(PRODUCT) -else - tpgtools_compile = -endif - ifneq ($(RESOURCE),) mmv1_compile += --resource $(RESOURCE) - tpgtools_compile += --resource $(RESOURCE) endif ifneq ($(OVERRIDES),) mmv1_compile += --overrides $(OVERRIDES) - tpgtools_compile += --overrides $(OVERRIDES)/tpgtools/overrides --path $(OVERRIDES)/tpgtools/api - serialize_compile = --overrides $(OVERRIDES)/tpgtools/overrides --path $(OVERRIDES)/tpgtools/api -else - tpgtools_compile += --path "api" --overrides "overrides" - serialize_compile = --path "api" --overrides "overrides" -endif - -ifneq ($(VERBOSE),) - tpgtools_compile += --logtostderr=1 --stderrthreshold=2 endif UNAME := $(shell uname) @@ -50,10 +25,6 @@ else SED_I := -i '' -E endif -ifeq ($(FORCE_DCL),) - FORCE_DCL=latest -endif - SHOULD_SKIP_CLEAN := false # Default: do not skip ifneq ($(SKIP_CLEAN),) ifneq ($(SKIP_CLEAN),false) @@ -61,7 +32,7 @@ ifneq ($(SKIP_CLEAN),) endif endif -terraform build provider: validate_environment clean-provider mmv1 tpgtools +terraform build provider: validate_environment clean-provider mmv1 @echo "Provider generation process finished for $(VERSION) in $(OUTPUT_PATH)" @@ -75,12 +46,6 @@ mmv1: go run . --output $(OUTPUT_PATH) --version $(VERSION) $(mmv1_compile); \ fi -tpgtools: serialize - @echo "Executing tpgtools build for $(OUTPUT_PATH)"; - @cd tpgtools;\ - go run . --output $(OUTPUT_PATH) --version $(VERSION) $(tpgtools_compile); \ - rm serialization.go - clean-provider: check_safe_build @if [ -n "$(PRODUCT)" ]; then \ printf "\n\e[1;33mWARNING:\e[0m Skipping clean-provider step because PRODUCT ('$(PRODUCT)') is set.\n"; \ @@ -136,24 +101,6 @@ test: cd mmv1; \ go test ./... -serialize: - cd tpgtools;\ - cp -f serialization.go.base serialization.go &&\ - go run . $(serialize_compile) --mode "serialization" > temp.serial &&\ - mv -f temp.serial serialization.go - -upgrade-dcl: - make serialize - cd tpgtools && \ - go mod edit -dropreplace=github.com/GoogleCloudPlatform/declarative-resource-client-library &&\ - go mod edit -require=github.com/GoogleCloudPlatform/declarative-resource-client-library@$(FORCE_DCL) &&\ - go mod tidy;\ - MOD_LINE=$$(grep declarative-resource-client-library go.mod);\ - SUM_LINE=$$(grep declarative-resource-client-library go.sum);\ - cd ../mmv1/third_party/terraform && \ - sed ${SED_I} "s!.*declarative-resource-client-library.*!$$MOD_LINE!" go.mod; echo "$$SUM_LINE" >> go.sum - - validate_environment: check_parameters check_safe_build check_parameters: @@ -175,4 +122,4 @@ check_safe_build: doctor: ./scripts/doctor -.PHONY: mmv1 tpgtools test clean-provider validate_environment serialize doctor +.PHONY: mmv1 test clean-provider validate_environment doctor diff --git a/docs/content/_index.md b/docs/content/_index.md index f85822683a53..54afbdf4bc12 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -35,11 +35,10 @@ detection. ### Resource types -There are three types of resources supported by Magic Modules: +There are two types of resources supported by Magic Modules: + MMv1 + Handwritten -+ DCL/tpgtools The following sections describe these tools in detail. @@ -96,17 +95,6 @@ In the providers, handwritten resources and datasources are stored in `PROVIDER/ is `google` or `google-beta`, `SERVICE` is the service name, and `FILENAME` is the name of the handwritten file in magic-modules. Handwritten files do not have an `AUTO GENERATED CODE` header. -#### DCL aka tpgtools (maintenance mode) - -DCL / tpgtools is similar to MMv1; however, it is in maintenance mode, which means that new resources using the DCL are not being added. - -DCL-based files start with the following header: - -``` -***     AUTO GENERATED CODE    ***    Type: DCL     *** -``` - - ## Other Resources + [Extending Terraform](https://www.terraform.io/plugin) diff --git a/docs/content/code-review/release-notes.md b/docs/content/code-review/release-notes.md index ef1380a84ffe..122406b22aa0 100644 --- a/docs/content/code-review/release-notes.md +++ b/docs/content/code-review/release-notes.md @@ -98,10 +98,10 @@ For each release note block, choose an appropriate type from the following list: Do | Don't -- | ----- Use past tense to describe the end state after the change is released. Start with a verb. For example, "added...", "fixed...", or "resolved...". You can use future tense to describe future changes, such as saying that a deprecated field will be removed in a future version. | Don't use present or future tense to describe changes that are included in the pull request. -Write user-focused release notes. For example, reference specific impacted terraform resource and field names, and discuss changes in behavior users will experience. | Avoid API field/resource/feature names. Avoid implementation details. Avoid language that requires understanding of provider internals. However, in case of substantial refactorings like API version changes or engine changes (tpgtools/DCL -> MMv1, handwritten <> MMv1) **do** cover the change so users can quickly identify the release if they are affected by the change. +Write user-focused release notes. For example, reference specific impacted terraform resource and field names, and discuss changes in behavior users will experience. | Avoid API field/resource/feature names. Avoid implementation details. Avoid language that requires understanding of provider internals. However, in case of substantial refactorings like API version changes or engine changes (handwritten <> MMv1) **do** cover the change so users can quickly identify the release if they are affected by the change. Surround resource or field names with backticks. | Don't use resource or field names without punctuation or with other punctuation like quotation marks. Use impersonal third person. | Don't use "I", "you", etc. -If the pull request impacts a specific product, begin your release note with that product name followed by a colon. Use lower case for the first letter after the colon. For example, `cloudrun: added...` For MMv1 resources, use the folder name that contains the yaml files as the product name; for handwritten or tpgtools resources, use the API subdomain; for broad cross-product changes, use `provider`. | Don't begin your release note with the full resource name. Don't add backticks around the product name. Don't capitalize the first letter after the colon. +If the pull request impacts a specific product, begin your release note with that product name followed by a colon. Use lower case for the first letter after the colon. For example, `cloudrun: added...` For MMv1 resources, use the folder name that contains the yaml files as the product name; for handwritten resources, use the API subdomain; for broad cross-product changes, use `provider`. | Don't begin your release note with the full resource name. Don't add backticks around the product name. Don't capitalize the first letter after the colon. ### Examples diff --git a/docs/content/reference/make-commands.md b/docs/content/reference/make-commands.md index 169f1329faa5..f9d9991015bb 100644 --- a/docs/content/reference/make-commands.md +++ b/docs/content/reference/make-commands.md @@ -34,10 +34,9 @@ make provider VERSION=ga OUTPUT_PATH="$GOPATH/src/github.com/hashicorp/terraform - `OUTPUT_PATH`: Required. The location you are generating provider code into. - `VERSION`: Required. The version of the provider you are building into. Valid values are `ga` and `beta`. -- `PRODUCT`: Limits generations to the specified folder within `mmv1/products` or `tpgtools/api`. Handwritten files from `mmv1/third_party/terraform` are always generated into the downstream regardless of this setting, so you can provide a non-existent product name to generate only handwritten code. Required if `RESOURCE` is specified. **Using `PRODUCT` skips the pre-generation cleanup step. This is considered advanced usage; recommend running a full, clean build (`make provider` without `PRODUCT`) beforehand if repositories may be out of sync.** +- `PRODUCT`: Limits generations to the specified folder within `mmv1/products`. Handwritten files from `mmv1/third_party/terraform` are always generated into the downstream regardless of this setting, so you can provide a non-existent product name to generate only handwritten code. Required if `RESOURCE` is specified. **Using `PRODUCT` skips the pre-generation cleanup step. This is considered advanced usage; recommend running a full, clean build (`make provider` without `PRODUCT`) beforehand if repositories may be out of sync.** - `SKIP_CLEAN`: If set to `true`, skips the default pre-generation cleanup of `OUTPUT_PATH` during a full provider build. Has no effect if `PRODUCT` is specified (as cleanup is already skipped). Example: `make provider VERSION=ga OUTPUT_PATH=... SKIP_CLEAN=true`. -- `RESOURCE`: Limits generation to the specified resource within a particular product. For `mmv1` resources, matches the resource's `name` field (set in its configuration file).For `tpgtools` resources, matches the terraform resource name. -- `ENGINE`: Modifies `make provider` to only generate code using the specified engine. Valid values are `mmv1` or `tpgtools`. (Providing `tpgtools` will still generate any prerequisite mmv1 files required for tpgtools.) +- `RESOURCE`: Limits generation to the specified resource within a particular product. For `mmv1` resources, matches the resource's `name` field (set in its configuration file). #### Cleaning up old files diff --git a/docs/content/reference/metadata.md b/docs/content/reference/metadata.md index d043eea42132..9a3c031cc818 100644 --- a/docs/content/reference/metadata.md +++ b/docs/content/reference/metadata.md @@ -9,7 +9,7 @@ This page documents the properties available in meta.yaml files. These files mak Meta.yaml files are auto-generated for MMv1 generated resources. -DCL and Handwritten resources will have handwritten meta.yaml files in the appropriate service directory in [mmv1/third_party/terraform/services/](https://github.com/GoogleCloudPlatform/magic-modules/tree/main/mmv1/third_party/terraform/services). The file name will be `resource_PRODUCT_RESOURCE_meta.yaml(.tmpl)`. For example, `resource_compute_instance_meta.yaml.tmpl` Handwritten meta.yaml files with a `.tmpl` extension can use version guards (`{{- if ne $.TargetVersionName "ga" }}...{{- else}}...{{- end}}`) to exclude beta fields from the `google` provider. +Handwritten resources will have handwritten meta.yaml files in the appropriate service directory in [mmv1/third_party/terraform/services/](https://github.com/GoogleCloudPlatform/magic-modules/tree/main/mmv1/third_party/terraform/services). The file name will be `resource_PRODUCT_RESOURCE_meta.yaml(.tmpl)`. For example, `resource_compute_instance_meta.yaml.tmpl` Handwritten meta.yaml files with a `.tmpl` extension can use version guards (`{{- if ne $.TargetVersionName "ga" }}...{{- else}}...{{- end}}`) to exclude beta fields from the `google` provider. All resources and fields must be present in meta.yaml files for the provider(s) they're available in. @@ -21,7 +21,7 @@ The name of the Terraform resource. For example, "google_cloudfunctions2_functio ### `generation_type` -The generation method used to create the Terraform resource. For example, "mmv1", "dcl", "handwritten". +The generation method used to create the Terraform resource. For example, "mmv1" or "handwritten". ### `api_service_name` diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index 9ff4de001473..ccac4a514523 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -7,7 +7,6 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.8 cloud.google.com/go/bigquery v1.73.1 cloud.google.com/go/bigtable v1.42.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.84.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/provider/provider_dcl_resources.go b/mmv1/third_party/terraform/provider/provider_dcl_resources.go new file mode 100644 index 000000000000..ab4567d5caae --- /dev/null +++ b/mmv1/third_party/terraform/provider/provider_dcl_resources.go @@ -0,0 +1,37 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/apikeys" + "github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads" + "github.com/hashicorp/terraform-provider-google/google/services/cloudbuild" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + "github.com/hashicorp/terraform-provider-google/google/services/dataproc" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + "github.com/hashicorp/terraform-provider-google/google/services/gkehub" + "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" +) + +var dclResources = map[string]*schema.Resource{ + "google_apikeys_key": apikeys.ResourceApikeysKey(), + "google_assured_workloads_workload": assuredworkloads.ResourceAssuredWorkloadsWorkload(), + "google_cloudbuild_worker_pool": cloudbuild.ResourceCloudbuildWorkerPool(), + "google_clouddeploy_delivery_pipeline": clouddeploy.ResourceClouddeployDeliveryPipeline(), + "google_clouddeploy_target": clouddeploy.ResourceClouddeployTarget(), + "google_container_aws_cluster": containeraws.ResourceContainerAwsCluster(), + "google_container_aws_node_pool": containeraws.ResourceContainerAwsNodePool(), + "google_container_azure_client": containerazure.ResourceContainerAzureClient(), + "google_container_azure_cluster": containerazure.ResourceContainerAzureCluster(), + "google_container_azure_node_pool": containerazure.ResourceContainerAzureNodePool(), + "google_dataplex_asset": dataplex.ResourceDataplexAsset(), + "google_dataplex_lake": dataplex.ResourceDataplexLake(), + "google_dataplex_zone": dataplex.ResourceDataplexZone(), + "google_dataproc_workflow_template": dataproc.ResourceDataprocWorkflowTemplate(), + "google_firebaserules_release": firebaserules.ResourceFirebaserulesRelease(), + "google_firebaserules_ruleset": firebaserules.ResourceFirebaserulesRuleset(), + "google_gke_hub_feature_membership": gkehub.ResourceGkeHubFeatureMembership(), + "google_recaptcha_enterprise_key": recaptchaenterprise.ResourceRecaptchaEnterpriseKey(), +} diff --git a/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go b/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go new file mode 100644 index 000000000000..7bfd2e7124b8 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go @@ -0,0 +1,101 @@ +package apikeys + +import ( + "bytes" + "context" + "encoding/json" + "io" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +func keyStringGetURL(userBasePath string, r *Key) (string, error) { + nr := r.urlNormalized() + params := map[string]any{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}/keyString", "https://apikeys.googleapis.com/v2/", userBasePath, params), nil +} + +func (c *Client) getKeyStringRaw(ctx context.Context, r *Key) ([]byte, error) { + + u, err := keyStringGetURL(c.Config.BasePath, r) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) getKeyRaw(ctx context.Context, r *Key) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) GetKey(ctx context.Context, r *Key) (*Key, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(60*time.Second)) + defer cancel() + + b, err := c.getKeyRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalKey(b, c, r) + if err != nil { + return nil, err + } + // Get the value of KeyString through a separate api method. + b, err = c.getKeyStringRaw(ctx, r) + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + result.KeyString = dcl.FlattenString(m["keyString"]) + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.Infof("Retrieved raw result state: %v", result) + c.Config.Logger.Infof("Canonicalizing with specified state: %v", r) + result, err = canonicalizeKeyNewState(c, result, r) + if err != nil { + return nil, err + } + c.Config.Logger.Infof("Created result state: %v", result) + + return result, nil +} diff --git a/mmv1/third_party/terraform/services/apikeys/client.go b/mmv1/third_party/terraform/services/apikeys/client.go new file mode 100644 index 000000000000..737e48c574e1 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/client.go @@ -0,0 +1,18 @@ +package apikeys + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/apikeys/key.go.tmpl b/mmv1/third_party/terraform/services/apikeys/key.go.tmpl new file mode 100644 index 000000000000..3c57eb375de4 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/key.go.tmpl @@ -0,0 +1,662 @@ +package apikeys + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type Key struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + KeyString *string `json:"keyString"` + Uid *string `json:"uid"` + ServiceAccountEmail *string `json:"serviceAccountEmail"` + Restrictions *KeyRestrictions `json:"restrictions"` + Project *string `json:"project"` +} + +func (r *Key) String() string { + return dcl.SprintResource(r) +} + +type KeyRestrictions struct { + empty bool `json:"-"` + BrowserKeyRestrictions *KeyRestrictionsBrowserKeyRestrictions `json:"browserKeyRestrictions"` + ServerKeyRestrictions *KeyRestrictionsServerKeyRestrictions `json:"serverKeyRestrictions"` + AndroidKeyRestrictions *KeyRestrictionsAndroidKeyRestrictions `json:"androidKeyRestrictions"` + IosKeyRestrictions *KeyRestrictionsIosKeyRestrictions `json:"iosKeyRestrictions"` + ApiTargets []KeyRestrictionsApiTargets `json:"apiTargets"` +} + +type jsonKeyRestrictions KeyRestrictions + +func (r *KeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictions + } else { + + r.BrowserKeyRestrictions = res.BrowserKeyRestrictions + + r.ServerKeyRestrictions = res.ServerKeyRestrictions + + r.AndroidKeyRestrictions = res.AndroidKeyRestrictions + + r.IosKeyRestrictions = res.IosKeyRestrictions + + r.ApiTargets = res.ApiTargets + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictions *KeyRestrictions = &KeyRestrictions{empty: true} + +func (r *KeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsBrowserKeyRestrictions struct { + empty bool `json:"-"` + AllowedReferrers []string `json:"allowedReferrers"` +} + +type jsonKeyRestrictionsBrowserKeyRestrictions KeyRestrictionsBrowserKeyRestrictions + +func (r *KeyRestrictionsBrowserKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsBrowserKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsBrowserKeyRestrictions + } else { + + r.AllowedReferrers = res.AllowedReferrers + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsBrowserKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsBrowserKeyRestrictions *KeyRestrictionsBrowserKeyRestrictions = &KeyRestrictionsBrowserKeyRestrictions{empty: true} + +func (r *KeyRestrictionsBrowserKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsBrowserKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsBrowserKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsServerKeyRestrictions struct { + empty bool `json:"-"` + AllowedIps []string `json:"allowedIps"` +} + +type jsonKeyRestrictionsServerKeyRestrictions KeyRestrictionsServerKeyRestrictions + +func (r *KeyRestrictionsServerKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsServerKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsServerKeyRestrictions + } else { + + r.AllowedIps = res.AllowedIps + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsServerKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsServerKeyRestrictions *KeyRestrictionsServerKeyRestrictions = &KeyRestrictionsServerKeyRestrictions{empty: true} + +func (r *KeyRestrictionsServerKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsServerKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsServerKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsAndroidKeyRestrictions struct { + empty bool `json:"-"` + AllowedApplications []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications `json:"allowedApplications"` +} + +type jsonKeyRestrictionsAndroidKeyRestrictions KeyRestrictionsAndroidKeyRestrictions + +func (r *KeyRestrictionsAndroidKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsAndroidKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsAndroidKeyRestrictions + } else { + + r.AllowedApplications = res.AllowedApplications + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsAndroidKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsAndroidKeyRestrictions *KeyRestrictionsAndroidKeyRestrictions = &KeyRestrictionsAndroidKeyRestrictions{empty: true} + +func (r *KeyRestrictionsAndroidKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsAndroidKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsAndroidKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsAndroidKeyRestrictionsAllowedApplications struct { + empty bool `json:"-"` + Sha1Fingerprint *string `json:"sha1Fingerprint"` + PackageName *string `json:"packageName"` +} + +type jsonKeyRestrictionsAndroidKeyRestrictionsAllowedApplications KeyRestrictionsAndroidKeyRestrictionsAllowedApplications + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + } else { + + r.Sha1Fingerprint = res.Sha1Fingerprint + + r.PackageName = res.PackageName + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsAndroidKeyRestrictionsAllowedApplications is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications = &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{empty: true} + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsIosKeyRestrictions struct { + empty bool `json:"-"` + AllowedBundleIds []string `json:"allowedBundleIds"` +} + +type jsonKeyRestrictionsIosKeyRestrictions KeyRestrictionsIosKeyRestrictions + +func (r *KeyRestrictionsIosKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsIosKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsIosKeyRestrictions + } else { + + r.AllowedBundleIds = res.AllowedBundleIds + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsIosKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsIosKeyRestrictions *KeyRestrictionsIosKeyRestrictions = &KeyRestrictionsIosKeyRestrictions{empty: true} + +func (r *KeyRestrictionsIosKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsIosKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsIosKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsApiTargets struct { + empty bool `json:"-"` + Service *string `json:"service"` + Methods []string `json:"methods"` +} + +type jsonKeyRestrictionsApiTargets KeyRestrictionsApiTargets + +func (r *KeyRestrictionsApiTargets) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsApiTargets + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsApiTargets + } else { + + r.Service = res.Service + + r.Methods = res.Methods + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsApiTargets is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsApiTargets *KeyRestrictionsApiTargets = &KeyRestrictionsApiTargets{empty: true} + +func (r *KeyRestrictionsApiTargets) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsApiTargets) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsApiTargets) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Key) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "apikeys", + Type: "Key", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "apikeys", +{{- end }} + } +} + +func (r *Key) ID() (string, error) { + if err := extractKeyFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "key_string": dcl.ValueOrEmptyString(nr.KeyString), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "service_account_email": dcl.ValueOrEmptyString(nr.ServiceAccountEmail), + "restrictions": dcl.ValueOrEmptyString(nr.Restrictions), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const KeyMaxPage = -1 + +type KeyList struct { + Items []*Key + + nextToken string + + pageSize int32 + + resource *Key +} + +func (l *KeyList) HasNext() bool { + return l.nextToken != "" +} + +func (l *KeyList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listKey(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListKey(ctx context.Context, project string) (*KeyList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListKeyWithMaxResults(ctx, project, KeyMaxPage) + +} + +func (c *Client) ListKeyWithMaxResults(ctx context.Context, project string, pageSize int32) (*KeyList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Key{ + Project: &project, + } + items, token, err := c.listKey(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &KeyList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) DeleteKey(ctx context.Context, r *Key) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Key resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Key...") + deleteOp := deleteKeyOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllKey deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllKey(ctx context.Context, project string, filter func(*Key) bool) error { + listObj, err := c.ListKey(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyKey(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Key + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyKeyHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyKeyHelper(c *Client, ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyKey...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractKeyFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.keyDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToKeyDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []keyApiOperation + if create { + ops = append(ops, &createKeyOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyKeyDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyKeyDiff(c *Client, ctx context.Context, desired *Key, rawDesired *Key, ops []keyApiOperation, opts ...dcl.ApplyOption) (*Key, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetKey(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createKeyOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapKey(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeKeyNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeKeyNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeKeyDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractKeyFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractKeyFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffKey(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/apikeys/key_internal.go b/mmv1/third_party/terraform/services/apikeys/key_internal.go new file mode 100644 index 000000000000..559b07482fd4 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/key_internal.go @@ -0,0 +1,2909 @@ +package apikeys + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Key) validate() error { + + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Restrictions) { + if err := r.Restrictions.validate(); err != nil { + return err + } + } + return nil +} +func (r *KeyRestrictions) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"BrowserKeyRestrictions", "ServerKeyRestrictions", "AndroidKeyRestrictions", "IosKeyRestrictions"}, r.BrowserKeyRestrictions, r.ServerKeyRestrictions, r.AndroidKeyRestrictions, r.IosKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.BrowserKeyRestrictions) { + if err := r.BrowserKeyRestrictions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ServerKeyRestrictions) { + if err := r.ServerKeyRestrictions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AndroidKeyRestrictions) { + if err := r.AndroidKeyRestrictions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.IosKeyRestrictions) { + if err := r.IosKeyRestrictions.validate(); err != nil { + return err + } + } + return nil +} +func (r *KeyRestrictionsBrowserKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedReferrers"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsServerKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedIps"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsAndroidKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedApplications"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) validate() error { + if err := dcl.Required(r, "sha1Fingerprint"); err != nil { + return err + } + if err := dcl.Required(r, "packageName"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsIosKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedBundleIds"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsApiTargets) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + return nil +} +func (r *Key) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://apikeys.googleapis.com/v2/", params) +} + +func (r *Key) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Key) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/locations/global/keys", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/global/keys?keyId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// keyApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type keyApiOperation interface { + do(context.Context, *Key, *Client) error +} + +// newUpdateKeyUpdateKeyRequest creates a request for an +// Key resource's UpdateKey update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateKeyUpdateKeyRequest(ctx context.Context, f *Key, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v, err := expandKeyRestrictions(c, f.Restrictions, res); err != nil { + return nil, fmt.Errorf("error expanding Restrictions into restrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["restrictions"] = v + } + return req, nil +} + +// marshalUpdateKeyUpdateKeyRequest converts the update into +// the final JSON request body. +func marshalUpdateKeyUpdateKeyRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateKeyUpdateKeyOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateKeyUpdateKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + _, err := c.GetKey(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateKey") + if err != nil { + return err + } + mask := dcl.TopLevelUpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateKeyUpdateKeyRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateKeyUpdateKeyRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listKeyRaw(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != KeyMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listKeyOperation struct { + Keys []map[string]interface{} `json:"keys"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listKey(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]*Key, string, error) { + b, err := c.listKeyRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listKeyOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Key + for _, v := range m.Keys { + res, err := unmarshalMapKey(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllKey(ctx context.Context, f func(*Key) bool, resources []*Key) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteKey(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteKeyOperation struct{} + +func (op *deleteKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + r, err := c.GetKey(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Key not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetKey checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createKeyOperation struct { + response map[string]interface{} +} + +func (op *createKeyOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetKey(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) keyDiffsForRawDesired(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (initial, desired *Key, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Key + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Key); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Key, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetKey(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Key resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Key resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Key resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Key: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Key: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractKeyFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeKeyInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Key: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Key: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffKey(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeKeyInitialState(rawInitial, rawDesired *Key) (*Key, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeKeyDesiredState(rawDesired, rawInitial *Key, opts ...dcl.ApplyOption) (*Key, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Restrictions = canonicalizeKeyRestrictions(rawDesired.Restrictions, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Key{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.ServiceAccountEmail) || (dcl.IsEmptyValueIndirect(rawDesired.ServiceAccountEmail) && dcl.IsEmptyValueIndirect(rawInitial.ServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.ServiceAccountEmail = rawInitial.ServiceAccountEmail + } else { + canonicalDesired.ServiceAccountEmail = rawDesired.ServiceAccountEmail + } + canonicalDesired.Restrictions = canonicalizeKeyRestrictions(rawDesired.Restrictions, rawInitial.Restrictions, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + return canonicalDesired, nil +} + +func canonicalizeKeyNewState(c *Client, rawNew, rawDesired *Key) (*Key, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.KeyString) && dcl.IsEmptyValueIndirect(rawDesired.KeyString) { + rawNew.KeyString = rawDesired.KeyString + } else { + if dcl.StringCanonicalize(rawDesired.KeyString, rawNew.KeyString) { + rawNew.KeyString = rawDesired.KeyString + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ServiceAccountEmail) && dcl.IsEmptyValueIndirect(rawDesired.ServiceAccountEmail) { + rawNew.ServiceAccountEmail = rawDesired.ServiceAccountEmail + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Restrictions) && dcl.IsEmptyValueIndirect(rawDesired.Restrictions) { + rawNew.Restrictions = rawDesired.Restrictions + } else { + rawNew.Restrictions = canonicalizeNewKeyRestrictions(c, rawDesired.Restrictions, rawNew.Restrictions) + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +func canonicalizeKeyRestrictions(des, initial *KeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.BrowserKeyRestrictions != nil || (initial != nil && initial.BrowserKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ServerKeyRestrictions, des.AndroidKeyRestrictions, des.IosKeyRestrictions) { + des.BrowserKeyRestrictions = nil + if initial != nil { + initial.BrowserKeyRestrictions = nil + } + } + } + + if des.ServerKeyRestrictions != nil || (initial != nil && initial.ServerKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.BrowserKeyRestrictions, des.AndroidKeyRestrictions, des.IosKeyRestrictions) { + des.ServerKeyRestrictions = nil + if initial != nil { + initial.ServerKeyRestrictions = nil + } + } + } + + if des.AndroidKeyRestrictions != nil || (initial != nil && initial.AndroidKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.BrowserKeyRestrictions, des.ServerKeyRestrictions, des.IosKeyRestrictions) { + des.AndroidKeyRestrictions = nil + if initial != nil { + initial.AndroidKeyRestrictions = nil + } + } + } + + if des.IosKeyRestrictions != nil || (initial != nil && initial.IosKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.BrowserKeyRestrictions, des.ServerKeyRestrictions, des.AndroidKeyRestrictions) { + des.IosKeyRestrictions = nil + if initial != nil { + initial.IosKeyRestrictions = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictions{} + + cDes.BrowserKeyRestrictions = canonicalizeKeyRestrictionsBrowserKeyRestrictions(des.BrowserKeyRestrictions, initial.BrowserKeyRestrictions, opts...) + cDes.ServerKeyRestrictions = canonicalizeKeyRestrictionsServerKeyRestrictions(des.ServerKeyRestrictions, initial.ServerKeyRestrictions, opts...) + cDes.AndroidKeyRestrictions = canonicalizeKeyRestrictionsAndroidKeyRestrictions(des.AndroidKeyRestrictions, initial.AndroidKeyRestrictions, opts...) + cDes.IosKeyRestrictions = canonicalizeKeyRestrictionsIosKeyRestrictions(des.IosKeyRestrictions, initial.IosKeyRestrictions, opts...) + cDes.ApiTargets = canonicalizeKeyRestrictionsApiTargetsSlice(des.ApiTargets, initial.ApiTargets, opts...) + + return cDes +} + +func canonicalizeKeyRestrictionsSlice(des, initial []KeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictions(c *Client, des, nw *KeyRestrictions) *KeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.BrowserKeyRestrictions = canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c, des.BrowserKeyRestrictions, nw.BrowserKeyRestrictions) + nw.ServerKeyRestrictions = canonicalizeNewKeyRestrictionsServerKeyRestrictions(c, des.ServerKeyRestrictions, nw.ServerKeyRestrictions) + nw.AndroidKeyRestrictions = canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c, des.AndroidKeyRestrictions, nw.AndroidKeyRestrictions) + nw.IosKeyRestrictions = canonicalizeNewKeyRestrictionsIosKeyRestrictions(c, des.IosKeyRestrictions, nw.IosKeyRestrictions) + nw.ApiTargets = canonicalizeNewKeyRestrictionsApiTargetsSlice(c, des.ApiTargets, nw.ApiTargets) + + return nw +} + +func canonicalizeNewKeyRestrictionsSet(c *Client, des, nw []KeyRestrictions) []KeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictions) []KeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsBrowserKeyRestrictions(des, initial *KeyRestrictionsBrowserKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsBrowserKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsBrowserKeyRestrictions{} + + if dcl.StringArrayCanonicalize(des.AllowedReferrers, initial.AllowedReferrers) { + cDes.AllowedReferrers = initial.AllowedReferrers + } else { + cDes.AllowedReferrers = des.AllowedReferrers + } + + return cDes +} + +func canonicalizeKeyRestrictionsBrowserKeyRestrictionsSlice(des, initial []KeyRestrictionsBrowserKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsBrowserKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsBrowserKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsBrowserKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsBrowserKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsBrowserKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c *Client, des, nw *KeyRestrictionsBrowserKeyRestrictions) *KeyRestrictionsBrowserKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsBrowserKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.AllowedReferrers, nw.AllowedReferrers) { + nw.AllowedReferrers = des.AllowedReferrers + } + + return nw +} + +func canonicalizeNewKeyRestrictionsBrowserKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsBrowserKeyRestrictions) []KeyRestrictionsBrowserKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsBrowserKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsBrowserKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsBrowserKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsBrowserKeyRestrictions) []KeyRestrictionsBrowserKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsBrowserKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsServerKeyRestrictions(des, initial *KeyRestrictionsServerKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsServerKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsServerKeyRestrictions{} + + if dcl.StringArrayCanonicalize(des.AllowedIps, initial.AllowedIps) { + cDes.AllowedIps = initial.AllowedIps + } else { + cDes.AllowedIps = des.AllowedIps + } + + return cDes +} + +func canonicalizeKeyRestrictionsServerKeyRestrictionsSlice(des, initial []KeyRestrictionsServerKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsServerKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsServerKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsServerKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsServerKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsServerKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsServerKeyRestrictions(c *Client, des, nw *KeyRestrictionsServerKeyRestrictions) *KeyRestrictionsServerKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsServerKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.AllowedIps, nw.AllowedIps) { + nw.AllowedIps = des.AllowedIps + } + + return nw +} + +func canonicalizeNewKeyRestrictionsServerKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsServerKeyRestrictions) []KeyRestrictionsServerKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsServerKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsServerKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsServerKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsServerKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsServerKeyRestrictions) []KeyRestrictionsServerKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsServerKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsServerKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictions(des, initial *KeyRestrictionsAndroidKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsAndroidKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsAndroidKeyRestrictions{} + + cDes.AllowedApplications = canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(des.AllowedApplications, initial.AllowedApplications, opts...) + + return cDes +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictionsSlice(des, initial []KeyRestrictionsAndroidKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsAndroidKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsAndroidKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsAndroidKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c *Client, des, nw *KeyRestrictionsAndroidKeyRestrictions) *KeyRestrictionsAndroidKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsAndroidKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AllowedApplications = canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c, des.AllowedApplications, nw.AllowedApplications) + + return nw +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictions) []KeyRestrictionsAndroidKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsAndroidKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsAndroidKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictions) []KeyRestrictionsAndroidKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsAndroidKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(des, initial *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, opts ...dcl.ApplyOption) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + + if dcl.StringCanonicalize(des.Sha1Fingerprint, initial.Sha1Fingerprint) || dcl.IsZeroValue(des.Sha1Fingerprint) { + cDes.Sha1Fingerprint = initial.Sha1Fingerprint + } else { + cDes.Sha1Fingerprint = des.Sha1Fingerprint + } + if dcl.StringCanonicalize(des.PackageName, initial.PackageName) || dcl.IsZeroValue(des.PackageName) { + cDes.PackageName = initial.PackageName + } else { + cDes.PackageName = des.PackageName + } + + return cDes +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(des, initial []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, opts ...dcl.ApplyOption) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c *Client, des, nw *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsAndroidKeyRestrictionsAllowedApplications while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Sha1Fingerprint, nw.Sha1Fingerprint) { + nw.Sha1Fingerprint = des.Sha1Fingerprint + } + if dcl.StringCanonicalize(des.PackageName, nw.PackageName) { + nw.PackageName = des.PackageName + } + + return nw +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSet(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsIosKeyRestrictions(des, initial *KeyRestrictionsIosKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsIosKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsIosKeyRestrictions{} + + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, initial.AllowedBundleIds) { + cDes.AllowedBundleIds = initial.AllowedBundleIds + } else { + cDes.AllowedBundleIds = des.AllowedBundleIds + } + + return cDes +} + +func canonicalizeKeyRestrictionsIosKeyRestrictionsSlice(des, initial []KeyRestrictionsIosKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsIosKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsIosKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsIosKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsIosKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsIosKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsIosKeyRestrictions(c *Client, des, nw *KeyRestrictionsIosKeyRestrictions) *KeyRestrictionsIosKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsIosKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, nw.AllowedBundleIds) { + nw.AllowedBundleIds = des.AllowedBundleIds + } + + return nw +} + +func canonicalizeNewKeyRestrictionsIosKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsIosKeyRestrictions) []KeyRestrictionsIosKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsIosKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsIosKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsIosKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsIosKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsIosKeyRestrictions) []KeyRestrictionsIosKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsIosKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsIosKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsApiTargets(des, initial *KeyRestrictionsApiTargets, opts ...dcl.ApplyOption) *KeyRestrictionsApiTargets { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsApiTargets{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringArrayCanonicalize(des.Methods, initial.Methods) { + cDes.Methods = initial.Methods + } else { + cDes.Methods = des.Methods + } + + return cDes +} + +func canonicalizeKeyRestrictionsApiTargetsSlice(des, initial []KeyRestrictionsApiTargets, opts ...dcl.ApplyOption) []KeyRestrictionsApiTargets { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsApiTargets, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsApiTargets(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsApiTargets, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsApiTargets(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsApiTargets(c *Client, des, nw *KeyRestrictionsApiTargets) *KeyRestrictionsApiTargets { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsApiTargets while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringArrayCanonicalize(des.Methods, nw.Methods) { + nw.Methods = des.Methods + } + + return nw +} + +func canonicalizeNewKeyRestrictionsApiTargetsSet(c *Client, des, nw []KeyRestrictionsApiTargets) []KeyRestrictionsApiTargets { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsApiTargets + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsApiTargetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsApiTargets(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsApiTargetsSlice(c *Client, des, nw []KeyRestrictionsApiTargets) []KeyRestrictionsApiTargets { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsApiTargets + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsApiTargets(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffKey(c *Client, desired, actual *Key, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeyString, actual.KeyString, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyString")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccountEmail, actual.ServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Restrictions, actual.Restrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Restrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictions or *KeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BrowserKeyRestrictions, actual.BrowserKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsBrowserKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsBrowserKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("BrowserKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServerKeyRestrictions, actual.ServerKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsServerKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsServerKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("ServerKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AndroidKeyRestrictions, actual.AndroidKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsAndroidKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsAndroidKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AndroidKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IosKeyRestrictions, actual.IosKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsIosKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsIosKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("IosKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ApiTargets, actual.ApiTargets, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsApiTargetsNewStyle, EmptyObject: EmptyKeyRestrictionsApiTargets, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("ApiTargets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsBrowserKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsBrowserKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsBrowserKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsBrowserKeyRestrictions or *KeyRestrictionsBrowserKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsBrowserKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsBrowserKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsBrowserKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedReferrers, actual.AllowedReferrers, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedReferrers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsServerKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsServerKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsServerKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsServerKeyRestrictions or *KeyRestrictionsServerKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsServerKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsServerKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsServerKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedIps, actual.AllowedIps, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedIps")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsAndroidKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsAndroidKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsAndroidKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictions or *KeyRestrictionsAndroidKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsAndroidKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsAndroidKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedApplications, actual.AllowedApplications, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsNewStyle, EmptyObject: EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedApplications")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictionsAllowedApplications or *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictionsAllowedApplications", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Sha1Fingerprint, actual.Sha1Fingerprint, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Sha1Fingerprint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PackageName, actual.PackageName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("PackageName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsIosKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsIosKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsIosKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsIosKeyRestrictions or *KeyRestrictionsIosKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsIosKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsIosKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsIosKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedBundleIds, actual.AllowedBundleIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedBundleIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsApiTargetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsApiTargets) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsApiTargets) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsApiTargets or *KeyRestrictionsApiTargets", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsApiTargets) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsApiTargets) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsApiTargets", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Methods, actual.Methods, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Methods")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Key) urlNormalized() *Key { + normalized := dcl.Copy(*r).(Key) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.KeyString = dcl.SelfLinkToName(r.KeyString) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.ServiceAccountEmail = dcl.SelfLinkToName(r.ServiceAccountEmail) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Key) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateKey" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Key resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Key) marshal(c *Client) ([]byte, error) { + m, err := expandKey(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Key: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalKey decodes JSON responses into the Key resource schema. +func unmarshalKey(b []byte, c *Client, res *Key) (*Key, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapKey(m, c, res) +} + +func unmarshalMapKey(m map[string]interface{}, c *Client, res *Key) (*Key, error) { + + flattened := flattenKey(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandKey expands Key into a JSON request object. +func expandKey(c *Client, f *Key) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.ServiceAccountEmail; dcl.ValueShouldBeSent(v) { + m["serviceAccountEmail"] = v + } + if v, err := expandKeyRestrictions(c, f.Restrictions, res); err != nil { + return nil, fmt.Errorf("error expanding Restrictions into restrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["restrictions"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenKey flattens Key from a JSON request object into the +// Key type. +func flattenKey(c *Client, i interface{}, res *Key) *Key { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Key{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.KeyString = dcl.FlattenString(m["keyString"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.ServiceAccountEmail = dcl.FlattenString(m["serviceAccountEmail"]) + resultRes.Restrictions = flattenKeyRestrictions(c, m["restrictions"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// expandKeyRestrictionsMap expands the contents of KeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsMap(c *Client, f map[string]KeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsSlice expands the contents of KeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsSlice(c *Client, f []KeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsMap flattens the contents of KeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictions{} + } + + items := make(map[string]KeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsSlice flattens the contents of KeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictions{} + } + + items := make([]KeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictions expands an instance of KeyRestrictions into a JSON +// request object. +func expandKeyRestrictions(c *Client, f *KeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandKeyRestrictionsBrowserKeyRestrictions(c, f.BrowserKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding BrowserKeyRestrictions into browserKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["browserKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsServerKeyRestrictions(c, f.ServerKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding ServerKeyRestrictions into serverKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serverKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsAndroidKeyRestrictions(c, f.AndroidKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding AndroidKeyRestrictions into androidKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["androidKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsIosKeyRestrictions(c, f.IosKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding IosKeyRestrictions into iosKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["iosKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsApiTargetsSlice(c, f.ApiTargets, res); err != nil { + return nil, fmt.Errorf("error expanding ApiTargets into apiTargets: %w", err) + } else if v != nil { + m["apiTargets"] = v + } + + return m, nil +} + +// flattenKeyRestrictions flattens an instance of KeyRestrictions from a JSON +// response object. +func flattenKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictions + } + r.BrowserKeyRestrictions = flattenKeyRestrictionsBrowserKeyRestrictions(c, m["browserKeyRestrictions"], res) + r.ServerKeyRestrictions = flattenKeyRestrictionsServerKeyRestrictions(c, m["serverKeyRestrictions"], res) + r.AndroidKeyRestrictions = flattenKeyRestrictionsAndroidKeyRestrictions(c, m["androidKeyRestrictions"], res) + r.IosKeyRestrictions = flattenKeyRestrictionsIosKeyRestrictions(c, m["iosKeyRestrictions"], res) + r.ApiTargets = flattenKeyRestrictionsApiTargetsSlice(c, m["apiTargets"], res) + + return r +} + +// expandKeyRestrictionsBrowserKeyRestrictionsMap expands the contents of KeyRestrictionsBrowserKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsBrowserKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsBrowserKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsBrowserKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsBrowserKeyRestrictionsSlice expands the contents of KeyRestrictionsBrowserKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsBrowserKeyRestrictionsSlice(c *Client, f []KeyRestrictionsBrowserKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsBrowserKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsBrowserKeyRestrictionsMap flattens the contents of KeyRestrictionsBrowserKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsBrowserKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsBrowserKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsBrowserKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsBrowserKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsBrowserKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsBrowserKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsBrowserKeyRestrictionsSlice flattens the contents of KeyRestrictionsBrowserKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsBrowserKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsBrowserKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsBrowserKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsBrowserKeyRestrictions{} + } + + items := make([]KeyRestrictionsBrowserKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsBrowserKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsBrowserKeyRestrictions expands an instance of KeyRestrictionsBrowserKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsBrowserKeyRestrictions(c *Client, f *KeyRestrictionsBrowserKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowedReferrers; v != nil { + m["allowedReferrers"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsBrowserKeyRestrictions flattens an instance of KeyRestrictionsBrowserKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsBrowserKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsBrowserKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsBrowserKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsBrowserKeyRestrictions + } + r.AllowedReferrers = dcl.FlattenStringSlice(m["allowedReferrers"]) + + return r +} + +// expandKeyRestrictionsServerKeyRestrictionsMap expands the contents of KeyRestrictionsServerKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsServerKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsServerKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsServerKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsServerKeyRestrictionsSlice expands the contents of KeyRestrictionsServerKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsServerKeyRestrictionsSlice(c *Client, f []KeyRestrictionsServerKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsServerKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsServerKeyRestrictionsMap flattens the contents of KeyRestrictionsServerKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsServerKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsServerKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsServerKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsServerKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsServerKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsServerKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsServerKeyRestrictionsSlice flattens the contents of KeyRestrictionsServerKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsServerKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsServerKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsServerKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsServerKeyRestrictions{} + } + + items := make([]KeyRestrictionsServerKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsServerKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsServerKeyRestrictions expands an instance of KeyRestrictionsServerKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsServerKeyRestrictions(c *Client, f *KeyRestrictionsServerKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowedIps; v != nil { + m["allowedIps"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsServerKeyRestrictions flattens an instance of KeyRestrictionsServerKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsServerKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsServerKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsServerKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsServerKeyRestrictions + } + r.AllowedIps = dcl.FlattenStringSlice(m["allowedIps"]) + + return r +} + +// expandKeyRestrictionsAndroidKeyRestrictionsMap expands the contents of KeyRestrictionsAndroidKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsAndroidKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsAndroidKeyRestrictionsSlice expands the contents of KeyRestrictionsAndroidKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsSlice(c *Client, f []KeyRestrictionsAndroidKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsMap flattens the contents of KeyRestrictionsAndroidKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsAndroidKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsAndroidKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsAndroidKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsAndroidKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsAndroidKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsSlice flattens the contents of KeyRestrictionsAndroidKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsAndroidKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsAndroidKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsAndroidKeyRestrictions{} + } + + items := make([]KeyRestrictionsAndroidKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsAndroidKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsAndroidKeyRestrictions expands an instance of KeyRestrictionsAndroidKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictions(c *Client, f *KeyRestrictionsAndroidKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c, f.AllowedApplications, res); err != nil { + return nil, fmt.Errorf("error expanding AllowedApplications into allowedApplications: %w", err) + } else if v != nil { + m["allowedApplications"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictions flattens an instance of KeyRestrictionsAndroidKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsAndroidKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsAndroidKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsAndroidKeyRestrictions + } + r.AllowedApplications = flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c, m["allowedApplications"], res) + + return r +} + +// expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap expands the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap(c *Client, f map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice expands the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c *Client, f []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap flattens the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + items := make(map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + for k, item := range a { + items[k] = *flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice flattens the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + if len(a) == 0 { + return []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications expands an instance of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c *Client, f *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Sha1Fingerprint; !dcl.IsEmptyValueIndirect(v) { + m["sha1Fingerprint"] = v + } + if v := f.PackageName; !dcl.IsEmptyValueIndirect(v) { + m["packageName"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications flattens an instance of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c *Client, i interface{}, res *Key) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + } + r.Sha1Fingerprint = dcl.FlattenString(m["sha1Fingerprint"]) + r.PackageName = dcl.FlattenString(m["packageName"]) + + return r +} + +// expandKeyRestrictionsIosKeyRestrictionsMap expands the contents of KeyRestrictionsIosKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsIosKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsIosKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsIosKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsIosKeyRestrictionsSlice expands the contents of KeyRestrictionsIosKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsIosKeyRestrictionsSlice(c *Client, f []KeyRestrictionsIosKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsIosKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsIosKeyRestrictionsMap flattens the contents of KeyRestrictionsIosKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsIosKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsIosKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsIosKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsIosKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsIosKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsIosKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsIosKeyRestrictionsSlice flattens the contents of KeyRestrictionsIosKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsIosKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsIosKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsIosKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsIosKeyRestrictions{} + } + + items := make([]KeyRestrictionsIosKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsIosKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsIosKeyRestrictions expands an instance of KeyRestrictionsIosKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsIosKeyRestrictions(c *Client, f *KeyRestrictionsIosKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowedBundleIds; v != nil { + m["allowedBundleIds"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsIosKeyRestrictions flattens an instance of KeyRestrictionsIosKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsIosKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsIosKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsIosKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsIosKeyRestrictions + } + r.AllowedBundleIds = dcl.FlattenStringSlice(m["allowedBundleIds"]) + + return r +} + +// expandKeyRestrictionsApiTargetsMap expands the contents of KeyRestrictionsApiTargets into a JSON +// request object. +func expandKeyRestrictionsApiTargetsMap(c *Client, f map[string]KeyRestrictionsApiTargets, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsApiTargets(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsApiTargetsSlice expands the contents of KeyRestrictionsApiTargets into a JSON +// request object. +func expandKeyRestrictionsApiTargetsSlice(c *Client, f []KeyRestrictionsApiTargets, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsApiTargets(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsApiTargetsMap flattens the contents of KeyRestrictionsApiTargets from a JSON +// response object. +func flattenKeyRestrictionsApiTargetsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsApiTargets { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsApiTargets{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsApiTargets{} + } + + items := make(map[string]KeyRestrictionsApiTargets) + for k, item := range a { + items[k] = *flattenKeyRestrictionsApiTargets(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsApiTargetsSlice flattens the contents of KeyRestrictionsApiTargets from a JSON +// response object. +func flattenKeyRestrictionsApiTargetsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsApiTargets { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsApiTargets{} + } + + if len(a) == 0 { + return []KeyRestrictionsApiTargets{} + } + + items := make([]KeyRestrictionsApiTargets, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsApiTargets(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsApiTargets expands an instance of KeyRestrictionsApiTargets into a JSON +// request object. +func expandKeyRestrictionsApiTargets(c *Client, f *KeyRestrictionsApiTargets, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Methods; v != nil { + m["methods"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsApiTargets flattens an instance of KeyRestrictionsApiTargets from a JSON +// response object. +func flattenKeyRestrictionsApiTargets(c *Client, i interface{}, res *Key) *KeyRestrictionsApiTargets { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsApiTargets{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsApiTargets + } + r.Service = dcl.FlattenString(m["service"]) + r.Methods = dcl.FlattenStringSlice(m["methods"]) + + return r +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Key) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalKey(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type keyDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp keyApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToKeyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]keyDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []keyDiff + // For each operation name, create a keyDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := keyDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToKeyApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToKeyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (keyApiOperation, error) { + switch opName { + + case "updateKeyUpdateKeyOperation": + return &updateKeyUpdateKeyOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractKeyFields(r *Key) error { + vRestrictions := r.Restrictions + if vRestrictions == nil { + // note: explicitly not the empty object. + vRestrictions = &KeyRestrictions{} + } + if err := extractKeyRestrictionsFields(r, vRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRestrictions) { + r.Restrictions = vRestrictions + } + return nil +} +func extractKeyRestrictionsFields(r *Key, o *KeyRestrictions) error { + vBrowserKeyRestrictions := o.BrowserKeyRestrictions + if vBrowserKeyRestrictions == nil { + // note: explicitly not the empty object. + vBrowserKeyRestrictions = &KeyRestrictionsBrowserKeyRestrictions{} + } + if err := extractKeyRestrictionsBrowserKeyRestrictionsFields(r, vBrowserKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBrowserKeyRestrictions) { + o.BrowserKeyRestrictions = vBrowserKeyRestrictions + } + vServerKeyRestrictions := o.ServerKeyRestrictions + if vServerKeyRestrictions == nil { + // note: explicitly not the empty object. + vServerKeyRestrictions = &KeyRestrictionsServerKeyRestrictions{} + } + if err := extractKeyRestrictionsServerKeyRestrictionsFields(r, vServerKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServerKeyRestrictions) { + o.ServerKeyRestrictions = vServerKeyRestrictions + } + vAndroidKeyRestrictions := o.AndroidKeyRestrictions + if vAndroidKeyRestrictions == nil { + // note: explicitly not the empty object. + vAndroidKeyRestrictions = &KeyRestrictionsAndroidKeyRestrictions{} + } + if err := extractKeyRestrictionsAndroidKeyRestrictionsFields(r, vAndroidKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidKeyRestrictions) { + o.AndroidKeyRestrictions = vAndroidKeyRestrictions + } + vIosKeyRestrictions := o.IosKeyRestrictions + if vIosKeyRestrictions == nil { + // note: explicitly not the empty object. + vIosKeyRestrictions = &KeyRestrictionsIosKeyRestrictions{} + } + if err := extractKeyRestrictionsIosKeyRestrictionsFields(r, vIosKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosKeyRestrictions) { + o.IosKeyRestrictions = vIosKeyRestrictions + } + return nil +} +func extractKeyRestrictionsBrowserKeyRestrictionsFields(r *Key, o *KeyRestrictionsBrowserKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsServerKeyRestrictionsFields(r *Key, o *KeyRestrictionsServerKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsAndroidKeyRestrictionsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) error { + return nil +} +func extractKeyRestrictionsIosKeyRestrictionsFields(r *Key, o *KeyRestrictionsIosKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsApiTargetsFields(r *Key, o *KeyRestrictionsApiTargets) error { + return nil +} + +func postReadExtractKeyFields(r *Key) error { + vRestrictions := r.Restrictions + if vRestrictions == nil { + // note: explicitly not the empty object. + vRestrictions = &KeyRestrictions{} + } + if err := postReadExtractKeyRestrictionsFields(r, vRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRestrictions) { + r.Restrictions = vRestrictions + } + return nil +} +func postReadExtractKeyRestrictionsFields(r *Key, o *KeyRestrictions) error { + vBrowserKeyRestrictions := o.BrowserKeyRestrictions + if vBrowserKeyRestrictions == nil { + // note: explicitly not the empty object. + vBrowserKeyRestrictions = &KeyRestrictionsBrowserKeyRestrictions{} + } + if err := extractKeyRestrictionsBrowserKeyRestrictionsFields(r, vBrowserKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBrowserKeyRestrictions) { + o.BrowserKeyRestrictions = vBrowserKeyRestrictions + } + vServerKeyRestrictions := o.ServerKeyRestrictions + if vServerKeyRestrictions == nil { + // note: explicitly not the empty object. + vServerKeyRestrictions = &KeyRestrictionsServerKeyRestrictions{} + } + if err := extractKeyRestrictionsServerKeyRestrictionsFields(r, vServerKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServerKeyRestrictions) { + o.ServerKeyRestrictions = vServerKeyRestrictions + } + vAndroidKeyRestrictions := o.AndroidKeyRestrictions + if vAndroidKeyRestrictions == nil { + // note: explicitly not the empty object. + vAndroidKeyRestrictions = &KeyRestrictionsAndroidKeyRestrictions{} + } + if err := extractKeyRestrictionsAndroidKeyRestrictionsFields(r, vAndroidKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidKeyRestrictions) { + o.AndroidKeyRestrictions = vAndroidKeyRestrictions + } + vIosKeyRestrictions := o.IosKeyRestrictions + if vIosKeyRestrictions == nil { + // note: explicitly not the empty object. + vIosKeyRestrictions = &KeyRestrictionsIosKeyRestrictions{} + } + if err := extractKeyRestrictionsIosKeyRestrictionsFields(r, vIosKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosKeyRestrictions) { + o.IosKeyRestrictions = vIosKeyRestrictions + } + return nil +} +func postReadExtractKeyRestrictionsBrowserKeyRestrictionsFields(r *Key, o *KeyRestrictionsBrowserKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsServerKeyRestrictionsFields(r *Key, o *KeyRestrictionsServerKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsAndroidKeyRestrictionsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) error { + return nil +} +func postReadExtractKeyRestrictionsIosKeyRestrictionsFields(r *Key, o *KeyRestrictionsIosKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsApiTargetsFields(r *Key, o *KeyRestrictionsApiTargets) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go new file mode 100644 index 000000000000..e67cfff3948d --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package apikeys + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLApikeysClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ApikeysBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go new file mode 100644 index 000000000000..9360cf572789 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go @@ -0,0 +1,697 @@ +package apikeys + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApikeysKey() *schema.Resource { + return &schema.Resource{ + Create: resourceApikeysKeyCreate, + Read: resourceApikeysKeyRead, + Update: resourceApikeysKeyUpdate, + Delete: resourceApikeysKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApikeysKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Human-readable display name of this API key. Modifiable by user.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "Key restrictions.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsSchema(), + }, + + "service_account_email": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The email of the service account the key is bound to. If this field is specified, the key is a service account bound key and auth enabled. See [Documentation](https://cloud.google.com/docs/authentication/api-keys?#api-keys-bound-sa) for more details.", + }, + + "key_string": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique id in UUID4 format.", + }, + }, + } +} + +func ApikeysKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "android_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The Android apps that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsAndroidKeyRestrictionsSchema(), + }, + + "api_targets": { + Type: schema.TypeList, + Optional: true, + Description: "A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed.", + Elem: ApikeysKeyRestrictionsApiTargetsSchema(), + }, + + "browser_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The HTTP referrers (websites) that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsBrowserKeyRestrictionsSchema(), + }, + + "ios_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The iOS apps that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsIosKeyRestrictionsSchema(), + }, + + "server_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The IP addresses of callers that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsServerKeyRestrictionsSchema(), + }, + }, + } +} + +func ApikeysKeyRestrictionsAndroidKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_applications": { + Type: schema.TypeList, + Required: true, + Description: "A list of Android applications that are allowed to make API calls with this key.", + Elem: ApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSchema(), + }, + }, + } +} + +func ApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "package_name": { + Type: schema.TypeString, + Required: true, + Description: "The package name of the application.", + }, + + "sha1_fingerprint": { + Type: schema.TypeString, + Required: true, + Description: "The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter.", + }, + }, + } +} + +func ApikeysKeyRestrictionsApiTargetsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + Description: "The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project.", + }, + + "methods": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*`", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ApikeysKeyRestrictionsBrowserKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_referrers": { + Type: schema.TypeList, + Required: true, + Description: "A list of regular expressions for the referrer URLs that are allowed to make API calls with this key.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ApikeysKeyRestrictionsIosKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_bundle_ids": { + Type: schema.TypeList, + Required: true, + Description: "A list of bundle IDs that are allowed when making API calls with this key.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ApikeysKeyRestrictionsServerKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_ips": { + Type: schema.TypeList, + Required: true, + Description: "A list of the caller IP addresses that are allowed to make API calls with this key.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceApikeysKeyRead(d, meta) +} + +func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetKey(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ApikeysKey %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("restrictions", flattenApikeysKeyRestrictions(res.Restrictions)); err != nil { + return fmt.Errorf("error setting restrictions in state: %s", err) + } + if err = d.Set("service_account_email", res.ServiceAccountEmail); err != nil { + return fmt.Errorf("error setting service_account_email in state: %s", err) + } + if err = d.Set("key_string", res.KeyString); err != nil { + return fmt.Errorf("error setting key_string in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + + return nil +} +func resourceApikeysKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceApikeysKeyRead(d, meta) +} + +func resourceApikeysKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + + log.Printf("[DEBUG] Deleting Key %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteKey(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Key: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Key %q", d.Id()) + return nil +} + +func resourceApikeysKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/keys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/keys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandApikeysKeyRestrictions(o interface{}) *KeyRestrictions { + if o == nil { + return EmptyKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictions{ + AndroidKeyRestrictions: expandApikeysKeyRestrictionsAndroidKeyRestrictions(obj["android_key_restrictions"]), + ApiTargets: expandApikeysKeyRestrictionsApiTargetsArray(obj["api_targets"]), + BrowserKeyRestrictions: expandApikeysKeyRestrictionsBrowserKeyRestrictions(obj["browser_key_restrictions"]), + IosKeyRestrictions: expandApikeysKeyRestrictionsIosKeyRestrictions(obj["ios_key_restrictions"]), + ServerKeyRestrictions: expandApikeysKeyRestrictionsServerKeyRestrictions(obj["server_key_restrictions"]), + } +} + +func flattenApikeysKeyRestrictions(obj *KeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "android_key_restrictions": flattenApikeysKeyRestrictionsAndroidKeyRestrictions(obj.AndroidKeyRestrictions), + "api_targets": flattenApikeysKeyRestrictionsApiTargetsArray(obj.ApiTargets), + "browser_key_restrictions": flattenApikeysKeyRestrictionsBrowserKeyRestrictions(obj.BrowserKeyRestrictions), + "ios_key_restrictions": flattenApikeysKeyRestrictionsIosKeyRestrictions(obj.IosKeyRestrictions), + "server_key_restrictions": flattenApikeysKeyRestrictionsServerKeyRestrictions(obj.ServerKeyRestrictions), + } + + return []interface{}{transformed} + +} + +func expandApikeysKeyRestrictionsAndroidKeyRestrictions(o interface{}) *KeyRestrictionsAndroidKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsAndroidKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsAndroidKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsAndroidKeyRestrictions{ + AllowedApplications: expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(obj["allowed_applications"]), + } +} + +func flattenApikeysKeyRestrictionsAndroidKeyRestrictions(obj *KeyRestrictionsAndroidKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_applications": flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(obj.AllowedApplications), + } + + return []interface{}{transformed} + +} +func expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(o interface{}) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if o == nil { + return make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0) + } + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(objs)) + for _, item := range objs { + i := expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(item) + items = append(items, *i) + } + + return items +} + +func expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(o interface{}) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if o == nil { + return EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + } + + obj := o.(map[string]interface{}) + return &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{ + PackageName: dcl.String(obj["package_name"].(string)), + Sha1Fingerprint: dcl.String(obj["sha1_fingerprint"].(string)), + } +} + +func flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(objs []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(&item) + items = append(items, i) + } + + return items +} + +func flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(obj *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "package_name": obj.PackageName, + "sha1_fingerprint": obj.Sha1Fingerprint, + } + + return transformed + +} +func expandApikeysKeyRestrictionsApiTargetsArray(o interface{}) []KeyRestrictionsApiTargets { + if o == nil { + return make([]KeyRestrictionsApiTargets, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]KeyRestrictionsApiTargets, 0) + } + + items := make([]KeyRestrictionsApiTargets, 0, len(objs)) + for _, item := range objs { + i := expandApikeysKeyRestrictionsApiTargets(item) + items = append(items, *i) + } + + return items +} + +func expandApikeysKeyRestrictionsApiTargets(o interface{}) *KeyRestrictionsApiTargets { + if o == nil { + return EmptyKeyRestrictionsApiTargets + } + + obj := o.(map[string]interface{}) + return &KeyRestrictionsApiTargets{ + Service: dcl.String(obj["service"].(string)), + Methods: tpgdclresource.ExpandStringArray(obj["methods"]), + } +} + +func flattenApikeysKeyRestrictionsApiTargetsArray(objs []KeyRestrictionsApiTargets) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenApikeysKeyRestrictionsApiTargets(&item) + items = append(items, i) + } + + return items +} + +func flattenApikeysKeyRestrictionsApiTargets(obj *KeyRestrictionsApiTargets) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + "methods": obj.Methods, + } + + return transformed + +} + +func expandApikeysKeyRestrictionsBrowserKeyRestrictions(o interface{}) *KeyRestrictionsBrowserKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsBrowserKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsBrowserKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsBrowserKeyRestrictions{ + AllowedReferrers: tpgdclresource.ExpandStringArray(obj["allowed_referrers"]), + } +} + +func flattenApikeysKeyRestrictionsBrowserKeyRestrictions(obj *KeyRestrictionsBrowserKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_referrers": obj.AllowedReferrers, + } + + return []interface{}{transformed} + +} + +func expandApikeysKeyRestrictionsIosKeyRestrictions(o interface{}) *KeyRestrictionsIosKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsIosKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsIosKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsIosKeyRestrictions{ + AllowedBundleIds: tpgdclresource.ExpandStringArray(obj["allowed_bundle_ids"]), + } +} + +func flattenApikeysKeyRestrictionsIosKeyRestrictions(obj *KeyRestrictionsIosKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_bundle_ids": obj.AllowedBundleIds, + } + + return []interface{}{transformed} + +} + +func expandApikeysKeyRestrictionsServerKeyRestrictions(o interface{}) *KeyRestrictionsServerKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsServerKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsServerKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsServerKeyRestrictions{ + AllowedIps: tpgdclresource.ExpandStringArray(obj["allowed_ips"]), + } +} + +func flattenApikeysKeyRestrictionsServerKeyRestrictions(obj *KeyRestrictionsServerKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_ips": obj.AllowedIps, + } + + return []interface{}{transformed} + +} diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go new file mode 100644 index 000000000000..e92d63798310 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go @@ -0,0 +1,452 @@ +package apikeys_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/apikeys" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccApikeysKey_AndroidKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_AndroidKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_AndroidKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_BasicKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_BasicKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_BasicKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_IosKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_IosKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_IosKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_MinimalKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_MinimalKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_ServerKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_ServerKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_ServerKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_ServiceAccountKeyHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_ServiceAccountKeyHandWritten(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccApikeysKey_AndroidKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app123" + sha1_fingerprint = "1699466a142d4682a5f91b50fdf400f2358e2b0b" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_AndroidKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app124" + sha1_fingerprint = "1cf89aa28625da86a7e5a7550cf7fd33d611f6fd" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_BasicKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + browser_key_restrictions { + allowed_referrers = [".*"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_BasicKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key-update" + + restrictions { + api_targets { + service = "maps.googleapis.com" + methods = ["POST*"] + } + + browser_key_restrictions { + allowed_referrers = [".*com"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_IosKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + ios_key_restrictions { + allowed_bundle_ids = ["com.google.app.macos"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_IosKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + ios_key_restrictions { + allowed_bundle_ids = ["com.google.alex.ios"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_MinimalKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" +} + + +`, context) +} + +func testAccApikeysKey_ServerKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + server_key_restrictions { + allowed_ips = ["127.0.0.1"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_ServerKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + server_key_restrictions { + allowed_ips = ["127.0.0.2", "192.168.1.1"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_ServiceAccountKeyHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + project = google_project.project.project_id + service_account_email = google_service_account.key_service_account.email +} + +resource "google_project" "project" { + project_id = "tf-test-app%{random_suffix}" + name = "tf-test-app%{random_suffix}" + org_id = "%{org_id}" + deletion_policy = "DELETE" +} + +resource "google_service_account" "key_service_account" { + account_id = "tf-test-app%{random_suffix}" + project = google_project.project.project_id + display_name = "Test Service Account" +} +`, context) +} + +func testAccCheckApikeysKeyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_apikeys_key" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &apikeys.Key{ + Name: dcl.String(rs.Primary.Attributes["name"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + ServiceAccountEmail: dcl.String(rs.Primary.Attributes["service_account_email"]), + KeyString: dcl.StringOrNil(rs.Primary.Attributes["key_string"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + } + + client := apikeys.NewDCLApikeysClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetKey(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_apikeys_key still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml index 3e3bfe25f6be..e378775095be 100644 --- a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_apikeys_key' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'apikeys.googleapis.com' api_version: 'v2' api_resource_type_kind: 'Key' diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go new file mode 100644 index 000000000000..87ab4feb93c4 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go @@ -0,0 +1,53 @@ +package apikeys + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("ApikeysKey", testSweepApikeysKey) +} + +func testSweepApikeysKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ApikeysKey") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLApikeysClient(config, config.UserAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableApikeysKey) + if err != nil { + return err + } + return nil +} + +func isDeletableApikeysKey(r *Key) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go b/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go new file mode 100644 index 000000000000..d57fc332293e --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go @@ -0,0 +1,137 @@ +package assuredworkloads + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "regexp" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +var universeDomainRegex = regexp.MustCompile(`https://[^/.]+([^/]+)/`) + +// Returns the URL of the project resource with the given index in the workload. +func (r *Workload) projectURL(userBasePath string, index int) (string, error) { + params := map[string]any{ + "project": dcl.ValueOrEmptyString(r.Resources[index].ResourceId), + } + // This is a hack to support universe domains & custom endpoints. This should really be + // handled by using a properly-configured cloud resource manager client to delete the + // project, but that's not available in this context. We will want to fix this when migrating + // to MMv1. + if userBasePath != "" { + matches := universeDomainRegex.FindStringSubmatch(userBasePath) + if len(matches) > 0 { + userBasePath = fmt.Sprintf("https://cloudresourcemanager%s/v1/", matches[1]) + } + } + return dcl.URL("projects/{{project}}", "https://cloudresourcemanager.googleapis.com/v1/", userBasePath, params), nil +} + +// Returns the URL of the folder resource with the given index in the workload. +func (r *Workload) folderURL(userBasePath string, index int) (string, error) { + params := map[string]any{ + "folder": dcl.ValueOrEmptyString(r.Resources[index].ResourceId), + } + // This is a hack to support universe domains & custom endpoints. This should really be + // handled by using a properly-configured cloud resource manager client to delete the + // folder, but that's not available in this context. We will want to fix this when migrating + // to MMv1. + if userBasePath != "" { + matches := universeDomainRegex.FindStringSubmatch(userBasePath) + if len(matches) > 0 { + userBasePath = fmt.Sprintf("https://cloudresourcemanager%s/v2/", matches[1]) + } + } + return dcl.URL("folders/{{folder}}", "https://cloudresourcemanager.googleapis.com/v2/", userBasePath, params), nil +} + +// Returns the lifecycle state of the project or folder resource with the given url. +func lifecycleState(ctx context.Context, client *Client, url string) (string, error) { + resp, err := dcl.SendRequest(ctx, client.Config, "GET", url, &bytes.Buffer{}, client.Config.RetryProvider) + if err != nil { + return "", err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return "", err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return "", err + } + state, ok := m["lifecycleState"].(string) + if !ok { + return "", fmt.Errorf("no lifecycle state for resource at %q", url) + } + return state, nil +} + +// Deletes the resource with the given URL. Returns true if it is already in DELETE_REQUESTED state, +// otherwise returns false. +func deleteResource(ctx context.Context, client *Client, url string) (bool, error) { + state, err := lifecycleState(ctx, client, url) + if err != nil { + return false, err + } + if state == "DELETE_REQUESTED" { + // Do not delete an already deleted resource. + return true, nil + } + // Send delete request for resources not already deleted. + _, err = dcl.SendRequest(ctx, client.Config, "DELETE", url, &bytes.Buffer{}, client.Config.RetryProvider) + if err != nil { + return false, fmt.Errorf("failed to delete resource at %s: %w", url, err) + } + return false, nil +} + +// Deletes projects and folders owned by the workload prior to workload deletion. +func (r *Workload) deleteResources(ctx context.Context, client *Client) error { + nr := r.urlNormalized() + return dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + // First, delete projects + for i, resource := range nr.Resources { + if resource.ResourceType == nil { + return nil, fmt.Errorf("nil resource type in workload %q", dcl.ValueOrEmptyString(nr.Name)) + } + if *resource.ResourceType == WorkloadResourcesResourceTypeEnum("CONSUMER_PROJECT") || *resource.ResourceType == WorkloadResourcesResourceTypeEnum("ENCRYPTION_KEYS_PROJECT") { + u, err := nr.projectURL(client.Config.BasePath, i) + if err != nil { + return nil, err + } + deleted, err := deleteResource(ctx, client, u) + if err != nil { + return nil, err + } + if !deleted { + // Retry until all resources are being deleted. + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + } + } + // Then, delete folders + for i, resource := range nr.Resources { + if *resource.ResourceType == WorkloadResourcesResourceTypeEnum("CONSUMER_FOLDER") { + u, err := nr.folderURL(client.Config.BasePath, i) + if err != nil { + return nil, err + } + deleted, err := deleteResource(ctx, client, u) + if err != nil { + return nil, err + } + if !deleted { + // Retry until all resources are being deleted. + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + } + } + // All project and folder resources are in DELETE_REQUESTED state. + return nil, nil + }, client.Config.RetryProvider) +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/client.go b/mmv1/third_party/terraform/services/assuredworkloads/client.go new file mode 100644 index 000000000000..6f3d2273c526 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/client.go @@ -0,0 +1,18 @@ +package assuredworkloads + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go new file mode 100644 index 000000000000..e240c1b3e039 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package assuredworkloads + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLAssuredWorkloadsClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.AssuredWorkloadsBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl new file mode 100644 index 000000000000..ba06bc56c6d5 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl @@ -0,0 +1,971 @@ +package assuredworkloads + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAssuredWorkloadsWorkload() *schema.Resource { + return &schema.Resource{ + Create: resourceAssuredWorkloadsWorkloadCreate, + Read: resourceAssuredWorkloadsWorkloadRead, + Update: resourceAssuredWorkloadsWorkloadUpdate, + Delete: resourceAssuredWorkloadsWorkloadDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAssuredWorkloadsWorkloadImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "compliance_regime": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT, IRS_1075", + }, + + "display_name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The organization for the resource", + }, + + "billing_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "enable_sovereign_controls": { + Type: schema.TypeBool, +{{- if ne $.TargetVersionName "ga" }} + Computed: true, +{{- end }} + Optional: true, + ForceNew: true, + Description: "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.", + }, + + "kms_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "**DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field.", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadKmsSettingsSchema(), + }, + + "partner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", + }, + + "partner_permissions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Permissions granted to the AW Partner SA account for the customer workload", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadPartnerPermissionsSchema(), + }, + + "partner_services_billing_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", + }, + + "provisioned_resources_parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}", + }, + + "resource_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional.", + Elem: AssuredWorkloadsWorkloadResourceSettingsSchema(), + }, + + "violation_notifications_enabled": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload.", + }, + + "workload_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadWorkloadOptionsSchema(), + }, + + "compliance_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Count of active Violations in the Workload.", + Elem: AssuredWorkloadsWorkloadComplianceStatusSchema(), + }, + + "compliant_but_disallowed_services": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Immutable. The Workload creation timestamp.", + }, + + "ekm_provisioning_response": { + Type: schema.TypeList, + Computed: true, + Description: "Optional. Represents the Ekm Provisioning State of the given workload.", + Elem: AssuredWorkloadsWorkloadEkmProvisioningResponseSchema(), + }, + + "kaj_enrollment_state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Represents the KAJ enrollment state of the given workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING, KAJ_ENROLLMENT_STATE_COMPLETE", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Labels applied to the workload.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The resource name of the workload.", + }, + + "resources": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only.", + Elem: AssuredWorkloadsWorkloadResourcesSchema(), + }, + + "saa_enrollment_response": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Represents the SAA enrollment response of the given workload. SAA enrollment response is queried during workloads.get call. In failure cases, user friendly error message is shown in SAA details page.", + Elem: AssuredWorkloadsWorkloadSaaEnrollmentResponseSchema(), + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadKmsSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "next_rotation_time": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.", + }, + + "rotation_period": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadPartnerPermissionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "assured_workloads_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Allow partner to view violation alerts.", + }, + + "data_logs_viewer": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Allow the partner to view inspectability logs and monitoring violations.", + }, + + "service_access_approver": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Allow partner to view access approval logs.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadResourceSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "User-assigned resource display name. If not empty it will be used to create a resource with the specified name.", + }, + + "resource_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folder_id is assigned by Google.", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + }, + }, + } +} + +func AssuredWorkloadsWorkloadWorkloadOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kaj_enrollment_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", + }, + }, + } +} + +func AssuredWorkloadsWorkloadComplianceStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "acknowledged_violation_count": { + Type: schema.TypeList, + Computed: true, + Description: "Number of current orgPolicy violations which are acknowledged.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "active_violation_count": { + Type: schema.TypeList, + Computed: true, + Description: "Number of current orgPolicy violations which are not acknowledged.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func AssuredWorkloadsWorkloadEkmProvisioningResponseSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ekm_provisioning_error_domain": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates Ekm provisioning error if any. Possible values: EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR, EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR", + }, + + "ekm_provisioning_error_mapping": { + Type: schema.TypeString, + Computed: true, + Description: "Detailed error message if Ekm provisioning fails Possible values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT, MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION", + }, + + "ekm_provisioning_state": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates Ekm enrollment Provisioning of a given workload. Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING, EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED", + }, + }, + } +} + +func AssuredWorkloadsWorkloadResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Resource identifier. For a project this represents project_number.", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + }, + }, + } +} + +func AssuredWorkloadsWorkloadSaaEnrollmentResponseSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "setup_errors": { + Type: schema.TypeList, + Computed: true, + Description: "Indicates SAA enrollment setup error if any.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "setup_status": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates SAA enrollment status of a given workload. Possible values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE", + }, + }, + } +} + +func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkload(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Workload: %s", err) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) + + return resourceAssuredWorkloadsWorkloadRead(d, meta) +} + +func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkload(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("AssuredWorkloadsWorkload %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("compliance_regime", res.ComplianceRegime); err != nil { + return fmt.Errorf("error setting compliance_regime in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("organization", res.Organization); err != nil { + return fmt.Errorf("error setting organization in state: %s", err) + } + if err = d.Set("billing_account", res.BillingAccount); err != nil { + return fmt.Errorf("error setting billing_account in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("enable_sovereign_controls", res.EnableSovereignControls); err != nil { + return fmt.Errorf("error setting enable_sovereign_controls in state: %s", err) + } + if err = d.Set("kms_settings", flattenAssuredWorkloadsWorkloadKmsSettings(res.KmsSettings)); err != nil { + return fmt.Errorf("error setting kms_settings in state: %s", err) + } + if err = d.Set("partner", res.Partner); err != nil { + return fmt.Errorf("error setting partner in state: %s", err) + } + if err = d.Set("partner_permissions", flattenAssuredWorkloadsWorkloadPartnerPermissions(res.PartnerPermissions)); err != nil { + return fmt.Errorf("error setting partner_permissions in state: %s", err) + } + if err = d.Set("partner_services_billing_account", res.PartnerServicesBillingAccount); err != nil { + return fmt.Errorf("error setting partner_services_billing_account in state: %s", err) + } + if err = d.Set("provisioned_resources_parent", res.ProvisionedResourcesParent); err != nil { + return fmt.Errorf("error setting provisioned_resources_parent in state: %s", err) + } + if err = d.Set("resource_settings", flattenAssuredWorkloadsWorkloadResourceSettingsArray(res.ResourceSettings)); err != nil { + return fmt.Errorf("error setting resource_settings in state: %s", err) + } + if err = d.Set("violation_notifications_enabled", res.ViolationNotificationsEnabled); err != nil { + return fmt.Errorf("error setting violation_notifications_enabled in state: %s", err) + } + if err = d.Set("workload_options", flattenAssuredWorkloadsWorkloadWorkloadOptions(res.WorkloadOptions)); err != nil { + return fmt.Errorf("error setting workload_options in state: %s", err) + } + if err = d.Set("compliance_status", flattenAssuredWorkloadsWorkloadComplianceStatus(res.ComplianceStatus)); err != nil { + return fmt.Errorf("error setting compliance_status in state: %s", err) + } + if err = d.Set("compliant_but_disallowed_services", res.CompliantButDisallowedServices); err != nil { + return fmt.Errorf("error setting compliant_but_disallowed_services in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("ekm_provisioning_response", flattenAssuredWorkloadsWorkloadEkmProvisioningResponse(res.EkmProvisioningResponse)); err != nil { + return fmt.Errorf("error setting ekm_provisioning_response in state: %s", err) + } + if err = d.Set("kaj_enrollment_state", res.KajEnrollmentState); err != nil { + return fmt.Errorf("error setting kaj_enrollment_state in state: %s", err) + } + if err = d.Set("labels", flattenAssuredWorkloadsWorkloadLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resources", flattenAssuredWorkloadsWorkloadResourcesArray(res.Resources)); err != nil { + return fmt.Errorf("error setting resources in state: %s", err) + } + if err = d.Set("saa_enrollment_response", flattenAssuredWorkloadsWorkloadSaaEnrollmentResponse(res.SaaEnrollmentResponse)); err != nil { + return fmt.Errorf("error setting saa_enrollment_response in state: %s", err) + } + if err = d.Set("terraform_labels", flattenAssuredWorkloadsWorkloadTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + + return nil +} +func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + // Construct state hint from old values + old := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(dcl.OldValue(d.GetChange("compliance_regime")).(string)), + DisplayName: dcl.String(dcl.OldValue(d.GetChange("display_name")).(string)), + Location: dcl.String(dcl.OldValue(d.GetChange("location")).(string)), + Organization: dcl.String(dcl.OldValue(d.GetChange("organization")).(string)), + BillingAccount: dcl.String(dcl.OldValue(d.GetChange("billing_account")).(string)), + Labels: tpgresource.CheckStringMap(dcl.OldValue(d.GetChange("effective_labels"))), + EnableSovereignControls: dcl.Bool(dcl.OldValue(d.GetChange("enable_sovereign_controls")).(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(dcl.OldValue(d.GetChange("kms_settings"))), + Partner: WorkloadPartnerEnumRef(dcl.OldValue(d.GetChange("partner")).(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(dcl.OldValue(d.GetChange("partner_permissions"))), + PartnerServicesBillingAccount: dcl.String(dcl.OldValue(d.GetChange("partner_services_billing_account")).(string)), + ProvisionedResourcesParent: dcl.String(dcl.OldValue(d.GetChange("provisioned_resources_parent")).(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(dcl.OldValue(d.GetChange("resource_settings"))), + ViolationNotificationsEnabled: dcl.Bool(dcl.OldValue(d.GetChange("violation_notifications_enabled")).(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(dcl.OldValue(d.GetChange("workload_options"))), + Name: dcl.StringOrNil(dcl.OldValue(d.GetChange("name")).(string)), + } + directive := dcl.UpdateDirective + directive = append(directive, dcl.WithStateHint(old)) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkload(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Workload: %s", err) + } + + log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) + + return resourceAssuredWorkloadsWorkloadRead(d, meta) +} + +func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Workload %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkload(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Workload: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Workload %q", d.Id()) + return nil +} + +func resourceAssuredWorkloadsWorkloadImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/locations/(?P[^/]+)/workloads/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandAssuredWorkloadsWorkloadKmsSettings(o interface{}) *WorkloadKmsSettings { + if o == nil { + return EmptyWorkloadKmsSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkloadKmsSettings + } + obj := objArr[0].(map[string]interface{}) + return &WorkloadKmsSettings{ + NextRotationTime: dcl.String(obj["next_rotation_time"].(string)), + RotationPeriod: dcl.String(obj["rotation_period"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadKmsSettings(obj *WorkloadKmsSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "next_rotation_time": obj.NextRotationTime, + "rotation_period": obj.RotationPeriod, + } + + return []interface{}{transformed} + +} + +func expandAssuredWorkloadsWorkloadPartnerPermissions(o interface{}) *WorkloadPartnerPermissions { + if o == nil { + return EmptyWorkloadPartnerPermissions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkloadPartnerPermissions + } + obj := objArr[0].(map[string]interface{}) + return &WorkloadPartnerPermissions{ + AssuredWorkloadsMonitoring: dcl.Bool(obj["assured_workloads_monitoring"].(bool)), + DataLogsViewer: dcl.Bool(obj["data_logs_viewer"].(bool)), + ServiceAccessApprover: dcl.Bool(obj["service_access_approver"].(bool)), + } +} + +func flattenAssuredWorkloadsWorkloadPartnerPermissions(obj *WorkloadPartnerPermissions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "assured_workloads_monitoring": obj.AssuredWorkloadsMonitoring, + "data_logs_viewer": obj.DataLogsViewer, + "service_access_approver": obj.ServiceAccessApprover, + } + + return []interface{}{transformed} + +} +func expandAssuredWorkloadsWorkloadResourceSettingsArray(o interface{}) []WorkloadResourceSettings { + if o == nil { + return make([]WorkloadResourceSettings, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkloadResourceSettings, 0) + } + + items := make([]WorkloadResourceSettings, 0, len(objs)) + for _, item := range objs { + i := expandAssuredWorkloadsWorkloadResourceSettings(item) + items = append(items, *i) + } + + return items +} + +func expandAssuredWorkloadsWorkloadResourceSettings(o interface{}) *WorkloadResourceSettings { + if o == nil { + return EmptyWorkloadResourceSettings + } + + obj := o.(map[string]interface{}) + return &WorkloadResourceSettings{ + DisplayName: dcl.String(obj["display_name"].(string)), + ResourceId: dcl.String(obj["resource_id"].(string)), + ResourceType: WorkloadResourceSettingsResourceTypeEnumRef(obj["resource_type"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadResourceSettingsArray(objs []WorkloadResourceSettings) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenAssuredWorkloadsWorkloadResourceSettings(&item) + items = append(items, i) + } + + return items +} + +func flattenAssuredWorkloadsWorkloadResourceSettings(obj *WorkloadResourceSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "display_name": obj.DisplayName, + "resource_id": obj.ResourceId, + "resource_type": obj.ResourceType, + } + + return transformed + +} + +func expandAssuredWorkloadsWorkloadWorkloadOptions(o interface{}) *WorkloadWorkloadOptions { + if o == nil { + return EmptyWorkloadWorkloadOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkloadWorkloadOptions + } + obj := objArr[0].(map[string]interface{}) + return &WorkloadWorkloadOptions{ + KajEnrollmentType: WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(obj["kaj_enrollment_type"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadWorkloadOptions(obj *WorkloadWorkloadOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kaj_enrollment_type": obj.KajEnrollmentType, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadComplianceStatus(obj *WorkloadComplianceStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "acknowledged_violation_count": obj.AcknowledgedViolationCount, + "active_violation_count": obj.ActiveViolationCount, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadEkmProvisioningResponse(obj *WorkloadEkmProvisioningResponse) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ekm_provisioning_error_domain": obj.EkmProvisioningErrorDomain, + "ekm_provisioning_error_mapping": obj.EkmProvisioningErrorMapping, + "ekm_provisioning_state": obj.EkmProvisioningState, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadResourcesArray(objs []WorkloadResources) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenAssuredWorkloadsWorkloadResources(&item) + items = append(items, i) + } + + return items +} + +func flattenAssuredWorkloadsWorkloadResources(obj *WorkloadResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_id": obj.ResourceId, + "resource_type": obj.ResourceType, + } + + return transformed + +} + +func flattenAssuredWorkloadsWorkloadSaaEnrollmentResponse(obj *WorkloadSaaEnrollmentResponse) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "setup_errors": flattenAssuredWorkloadsWorkloadSaaEnrollmentResponseSetupErrorsArray(obj.SetupErrors), + "setup_status": obj.SetupStatus, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenAssuredWorkloadsWorkloadTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenAssuredWorkloadsWorkloadSaaEnrollmentResponseSetupErrorsArray(obj []WorkloadSaaEnrollmentResponseSetupErrorsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl new file mode 100644 index 000000000000..5bc2a6dcf258 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl @@ -0,0 +1,392 @@ +package assuredworkloads_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +{{- if ne $.TargetVersionName "ga" }} +func TestAccAssuredWorkloadsWorkload_SovereignControlsWorkload(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_SovereignControlsWorkload(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + { + Config: testAccAssuredWorkloadsWorkload_SovereignControlsWorkloadUpdate0(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccAssuredWorkloadsWorkload_SplitBillingPartnerWorkload(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_SplitBillingPartnerWorkload(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +} +{{- end }} +func TestAccAssuredWorkloadsWorkload_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_BasicHandWritten(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + { + Config: testAccAssuredWorkloadsWorkload_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccAssuredWorkloadsWorkload_FullHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_FullHandWritten(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +{{- if ne $.TargetVersionName "ga" }} +} + +func testAccAssuredWorkloadsWorkload_SovereignControlsWorkload(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "EU_REGIONS_AND_SUPPORT" + display_name = "tf-test-display%{random_suffix}" + location = "europe-west9" + organization = "%{org_id}" + billing_account = "billingAccounts/%{billing_acct}" + enable_sovereign_controls = true + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "tf-test-ring%{random_suffix}" + resource_type = "KEYRING" + } + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +`, context) +} + +func testAccAssuredWorkloadsWorkload_SovereignControlsWorkloadUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "EU_REGIONS_AND_SUPPORT" + display_name = "updated-example" + location = "europe-west9" + organization = "%{org_id}" + billing_account = "billingAccounts/%{billing_acct}" + + labels = { + label-two = "value-two-eu-regions-and-support" + } + provider = google-beta +} + +`, context) +} + +func testAccAssuredWorkloadsWorkload_SplitBillingPartnerWorkload(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "ASSURED_WORKLOADS_FOR_PARTNERS" + display_name = "tf-test-display%{random_suffix}" + location = "europe-west8" + organization = "%{org_id}" + billing_account = "billingAccounts/%{billing_acct}" + partner = "SOVEREIGN_CONTROLS_BY_PSN" + + partner_permissions { + assured_workloads_monitoring = true + data_logs_viewer = true + service_access_approver = true + } + + partner_services_billing_account = "billingAccounts/01BF3F-2C6DE5-30C607" + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "tf-test-ring%{random_suffix}" + resource_type = "KEYRING" + } + + violation_notifications_enabled = true + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +`, context) +{{- end }} +} + +func testAccAssuredWorkloadsWorkload_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + labels = { + a = "a" + } + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + provisioned_resources_parent = google_folder.folder1.name + organization = "%{org_id}" + location = "us-central1" + workload_options { + kaj_enrollment_type = "KEY_ACCESS_TRANSPARENCY_OFF" + } + resource_settings { + resource_type = "CONSUMER_FOLDER" + display_name = "folder-display-name" + } + violation_notifications_enabled = true + depends_on = [time_sleep.wait_120_seconds] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_folder.folder1] +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" + deletion_protection = false +} +`, context) +} + +func testAccAssuredWorkloadsWorkload_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + labels = { + a = "b" + } + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + provisioned_resources_parent = google_folder.folder1.name + organization = "%{org_id}" + location = "us-central1" + resource_settings { + resource_type = "CONSUMER_FOLDER" + display_name = "folder-display-name" + } + violation_notifications_enabled = true + depends_on = [time_sleep.wait_120_seconds] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_folder.folder1] +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" + deletion_protection = false +} +`, context) +} + +func testAccAssuredWorkloadsWorkload_FullHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + organization = "%{org_id}" + location = "us-central1" + kms_settings { + next_rotation_time = "2022-10-02T15:01:23Z" + rotation_period = "864000s" + } + provisioned_resources_parent = google_folder.folder1.name + depends_on = [time_sleep.wait_120_seconds] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_folder.folder1] +} + + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" + deletion_protection = false +} + +`, context) +} + +func testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_assured_workloads_workload" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &assuredworkloads.Workload{ + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(rs.Primary.Attributes["compliance_regime"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Organization: dcl.String(rs.Primary.Attributes["organization"]), + BillingAccount: dcl.String(rs.Primary.Attributes["billing_account"]), + EnableSovereignControls: dcl.Bool(rs.Primary.Attributes["enable_sovereign_controls"] == "true"), + Partner: assuredworkloads.WorkloadPartnerEnumRef(rs.Primary.Attributes["partner"]), + PartnerServicesBillingAccount: dcl.String(rs.Primary.Attributes["partner_services_billing_account"]), + ProvisionedResourcesParent: dcl.String(rs.Primary.Attributes["provisioned_resources_parent"]), + ViolationNotificationsEnabled: dcl.Bool(rs.Primary.Attributes["violation_notifications_enabled"] == "true"), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + KajEnrollmentState: assuredworkloads.WorkloadKajEnrollmentStateEnumRef(rs.Primary.Attributes["kaj_enrollment_state"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := assuredworkloads.NewDCLAssuredWorkloadsClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetWorkload(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_assured_workloads_workload still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl index cd17ac3fc915..45d8544b3e16 100644 --- a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_assured_workloads_workload' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'assuredworkloads.googleapis.com' {{- if ne $.TargetVersionName "ga" }} api_version: 'v1beta1' diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl new file mode 100644 index 000000000000..96a1b33b391c --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl @@ -0,0 +1,1121 @@ +package assuredworkloads + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Workload struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Resources []WorkloadResources `json:"resources"` + ComplianceRegime *WorkloadComplianceRegimeEnum `json:"complianceRegime"` + CreateTime *string `json:"createTime"` + BillingAccount *string `json:"billingAccount"` + PartnerServicesBillingAccount *string `json:"partnerServicesBillingAccount"` + Labels map[string]string `json:"labels"` + ProvisionedResourcesParent *string `json:"provisionedResourcesParent"` + KmsSettings *WorkloadKmsSettings `json:"kmsSettings"` + ResourceSettings []WorkloadResourceSettings `json:"resourceSettings"` + KajEnrollmentState *WorkloadKajEnrollmentStateEnum `json:"kajEnrollmentState"` + EnableSovereignControls *bool `json:"enableSovereignControls"` + SaaEnrollmentResponse *WorkloadSaaEnrollmentResponse `json:"saaEnrollmentResponse"` + ComplianceStatus *WorkloadComplianceStatus `json:"complianceStatus"` + CompliantButDisallowedServices []string `json:"compliantButDisallowedServices"` + Partner *WorkloadPartnerEnum `json:"partner"` + PartnerPermissions *WorkloadPartnerPermissions `json:"partnerPermissions"` + WorkloadOptions *WorkloadWorkloadOptions `json:"workloadOptions"` + EkmProvisioningResponse *WorkloadEkmProvisioningResponse `json:"ekmProvisioningResponse"` + ViolationNotificationsEnabled *bool `json:"violationNotificationsEnabled"` + Organization *string `json:"organization"` + Location *string `json:"location"` +} + +func (r *Workload) String() string { + return dcl.SprintResource(r) +} + +// The enum WorkloadResourcesResourceTypeEnum. +type WorkloadResourcesResourceTypeEnum string + +// WorkloadResourcesResourceTypeEnumRef returns a *WorkloadResourcesResourceTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadResourcesResourceTypeEnumRef(s string) *WorkloadResourcesResourceTypeEnum { + v := WorkloadResourcesResourceTypeEnum(s) + return &v +} + +func (v WorkloadResourcesResourceTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"RESOURCE_TYPE_UNSPECIFIED", "CONSUMER_PROJECT", "ENCRYPTION_KEYS_PROJECT", "KEYRING", "CONSUMER_FOLDER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadResourcesResourceTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadComplianceRegimeEnum. +type WorkloadComplianceRegimeEnum string + +// WorkloadComplianceRegimeEnumRef returns a *WorkloadComplianceRegimeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadComplianceRegimeEnumRef(s string) *WorkloadComplianceRegimeEnum { + v := WorkloadComplianceRegimeEnum(s) + return &v +} + +func (v WorkloadComplianceRegimeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPLIANCE_REGIME_UNSPECIFIED", "IL4", "CJIS", "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS", "HIPAA", "HITRUST", "EU_REGIONS_AND_SUPPORT", "CA_REGIONS_AND_SUPPORT", "ITAR", "AU_REGIONS_AND_US_SUPPORT", "ASSURED_WORKLOADS_FOR_PARTNERS", "ISR_REGIONS", "ISR_REGIONS_AND_SUPPORT", "CA_PROTECTED_B", "IL5", "IL2", "JP_REGIONS_AND_SUPPORT", "KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS", "REGIONAL_CONTROLS", "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS", "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT", "IRS_1075"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadComplianceRegimeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadResourceSettingsResourceTypeEnum. +type WorkloadResourceSettingsResourceTypeEnum string + +// WorkloadResourceSettingsResourceTypeEnumRef returns a *WorkloadResourceSettingsResourceTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadResourceSettingsResourceTypeEnumRef(s string) *WorkloadResourceSettingsResourceTypeEnum { + v := WorkloadResourceSettingsResourceTypeEnum(s) + return &v +} + +func (v WorkloadResourceSettingsResourceTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"RESOURCE_TYPE_UNSPECIFIED", "CONSUMER_PROJECT", "ENCRYPTION_KEYS_PROJECT", "KEYRING", "CONSUMER_FOLDER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadResourceSettingsResourceTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadKajEnrollmentStateEnum. +type WorkloadKajEnrollmentStateEnum string + +// WorkloadKajEnrollmentStateEnumRef returns a *WorkloadKajEnrollmentStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadKajEnrollmentStateEnumRef(s string) *WorkloadKajEnrollmentStateEnum { + v := WorkloadKajEnrollmentStateEnum(s) + return &v +} + +func (v WorkloadKajEnrollmentStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"KAJ_ENROLLMENT_STATE_UNSPECIFIED", "KAJ_ENROLLMENT_STATE_PENDING", "KAJ_ENROLLMENT_STATE_COMPLETE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadKajEnrollmentStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadSaaEnrollmentResponseSetupErrorsEnum. +type WorkloadSaaEnrollmentResponseSetupErrorsEnum string + +// WorkloadSaaEnrollmentResponseSetupErrorsEnumRef returns a *WorkloadSaaEnrollmentResponseSetupErrorsEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadSaaEnrollmentResponseSetupErrorsEnumRef(s string) *WorkloadSaaEnrollmentResponseSetupErrorsEnum { + v := WorkloadSaaEnrollmentResponseSetupErrorsEnum(s) + return &v +} + +func (v WorkloadSaaEnrollmentResponseSetupErrorsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"SETUP_ERROR_UNSPECIFIED", "ERROR_INVALID_BASE_SETUP", "ERROR_MISSING_EXTERNAL_SIGNING_KEY", "ERROR_NOT_ALL_SERVICES_ENROLLED", "ERROR_SETUP_CHECK_FAILED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadSaaEnrollmentResponseSetupErrorsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadSaaEnrollmentResponseSetupStatusEnum. +type WorkloadSaaEnrollmentResponseSetupStatusEnum string + +// WorkloadSaaEnrollmentResponseSetupStatusEnumRef returns a *WorkloadSaaEnrollmentResponseSetupStatusEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadSaaEnrollmentResponseSetupStatusEnumRef(s string) *WorkloadSaaEnrollmentResponseSetupStatusEnum { + v := WorkloadSaaEnrollmentResponseSetupStatusEnum(s) + return &v +} + +func (v WorkloadSaaEnrollmentResponseSetupStatusEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"SETUP_STATE_UNSPECIFIED", "STATUS_PENDING", "STATUS_COMPLETE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadSaaEnrollmentResponseSetupStatusEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadPartnerEnum. +type WorkloadPartnerEnum string + +// WorkloadPartnerEnumRef returns a *WorkloadPartnerEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadPartnerEnumRef(s string) *WorkloadPartnerEnum { + v := WorkloadPartnerEnum(s) + return &v +} + +func (v WorkloadPartnerEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PARTNER_UNSPECIFIED", "LOCAL_CONTROLS_BY_S3NS", "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", "SOVEREIGN_CONTROLS_BY_PSN", "SOVEREIGN_CONTROLS_BY_CNTXT", "SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadPartnerEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadWorkloadOptionsKajEnrollmentTypeEnum. +type WorkloadWorkloadOptionsKajEnrollmentTypeEnum string + +// WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef returns a *WorkloadWorkloadOptionsKajEnrollmentTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(s string) *WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + v := WorkloadWorkloadOptionsKajEnrollmentTypeEnum(s) + return &v +} + +func (v WorkloadWorkloadOptionsKajEnrollmentTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"KAJ_ENROLLMENT_TYPE_UNSPECIFIED", "FULL_KAJ", "EKM_ONLY", "KEY_ACCESS_TRANSPARENCY_OFF"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadWorkloadOptionsKajEnrollmentTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadEkmProvisioningResponseEkmProvisioningStateEnum. +type WorkloadEkmProvisioningResponseEkmProvisioningStateEnum string + +// WorkloadEkmProvisioningResponseEkmProvisioningStateEnumRef returns a *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadEkmProvisioningResponseEkmProvisioningStateEnumRef(s string) *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + v := WorkloadEkmProvisioningResponseEkmProvisioningStateEnum(s) + return &v +} + +func (v WorkloadEkmProvisioningResponseEkmProvisioningStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EKM_PROVISIONING_STATE_UNSPECIFIED", "EKM_PROVISIONING_STATE_PENDING", "EKM_PROVISIONING_STATE_FAILED", "EKM_PROVISIONING_STATE_COMPLETED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadEkmProvisioningResponseEkmProvisioningStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum. +type WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum string + +// WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumRef returns a *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumRef(s string) *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + v := WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(s) + return &v +} + +func (v WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED", "UNSPECIFIED_ERROR", "GOOGLE_SERVER_ERROR", "EXTERNAL_USER_ERROR", "EXTERNAL_PARTNER_ERROR", "TIMEOUT_ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum. +type WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum string + +// WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumRef returns a *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumRef(s string) *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + v := WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(s) + return &v +} + +func (v WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED", "INVALID_SERVICE_ACCOUNT", "MISSING_METRICS_SCOPE_ADMIN_PERMISSION", "MISSING_EKM_CONNECTION_ADMIN_PERMISSION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum", + Value: string(v), + Valid: []string{}, + } +} + +type WorkloadResources struct { + empty bool `json:"-"` + ResourceId *int64 `json:"resourceId"` + ResourceType *WorkloadResourcesResourceTypeEnum `json:"resourceType"` +} + +type jsonWorkloadResources WorkloadResources + +func (r *WorkloadResources) UnmarshalJSON(data []byte) error { + var res jsonWorkloadResources + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadResources + } else { + + r.ResourceId = res.ResourceId + + r.ResourceType = res.ResourceType + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadResources is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadResources *WorkloadResources = &WorkloadResources{empty: true} + +func (r *WorkloadResources) Empty() bool { + return r.empty +} + +func (r *WorkloadResources) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadResources) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadKmsSettings struct { + empty bool `json:"-"` + NextRotationTime *string `json:"nextRotationTime"` + RotationPeriod *string `json:"rotationPeriod"` +} + +type jsonWorkloadKmsSettings WorkloadKmsSettings + +func (r *WorkloadKmsSettings) UnmarshalJSON(data []byte) error { + var res jsonWorkloadKmsSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadKmsSettings + } else { + + r.NextRotationTime = res.NextRotationTime + + r.RotationPeriod = res.RotationPeriod + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadKmsSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadKmsSettings *WorkloadKmsSettings = &WorkloadKmsSettings{empty: true} + +func (r *WorkloadKmsSettings) Empty() bool { + return r.empty +} + +func (r *WorkloadKmsSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadKmsSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadResourceSettings struct { + empty bool `json:"-"` + ResourceId *string `json:"resourceId"` + ResourceType *WorkloadResourceSettingsResourceTypeEnum `json:"resourceType"` + DisplayName *string `json:"displayName"` +} + +type jsonWorkloadResourceSettings WorkloadResourceSettings + +func (r *WorkloadResourceSettings) UnmarshalJSON(data []byte) error { + var res jsonWorkloadResourceSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadResourceSettings + } else { + + r.ResourceId = res.ResourceId + + r.ResourceType = res.ResourceType + + r.DisplayName = res.DisplayName + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadResourceSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadResourceSettings *WorkloadResourceSettings = &WorkloadResourceSettings{empty: true} + +func (r *WorkloadResourceSettings) Empty() bool { + return r.empty +} + +func (r *WorkloadResourceSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadResourceSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadSaaEnrollmentResponse struct { + empty bool `json:"-"` + SetupErrors []WorkloadSaaEnrollmentResponseSetupErrorsEnum `json:"setupErrors"` + SetupStatus *WorkloadSaaEnrollmentResponseSetupStatusEnum `json:"setupStatus"` +} + +type jsonWorkloadSaaEnrollmentResponse WorkloadSaaEnrollmentResponse + +func (r *WorkloadSaaEnrollmentResponse) UnmarshalJSON(data []byte) error { + var res jsonWorkloadSaaEnrollmentResponse + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadSaaEnrollmentResponse + } else { + + r.SetupErrors = res.SetupErrors + + r.SetupStatus = res.SetupStatus + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadSaaEnrollmentResponse is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadSaaEnrollmentResponse *WorkloadSaaEnrollmentResponse = &WorkloadSaaEnrollmentResponse{empty: true} + +func (r *WorkloadSaaEnrollmentResponse) Empty() bool { + return r.empty +} + +func (r *WorkloadSaaEnrollmentResponse) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadSaaEnrollmentResponse) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadComplianceStatus struct { + empty bool `json:"-"` + ActiveViolationCount []int64 `json:"activeViolationCount"` + AcknowledgedViolationCount []int64 `json:"acknowledgedViolationCount"` +} + +type jsonWorkloadComplianceStatus WorkloadComplianceStatus + +func (r *WorkloadComplianceStatus) UnmarshalJSON(data []byte) error { + var res jsonWorkloadComplianceStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadComplianceStatus + } else { + + r.ActiveViolationCount = res.ActiveViolationCount + + r.AcknowledgedViolationCount = res.AcknowledgedViolationCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadComplianceStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadComplianceStatus *WorkloadComplianceStatus = &WorkloadComplianceStatus{empty: true} + +func (r *WorkloadComplianceStatus) Empty() bool { + return r.empty +} + +func (r *WorkloadComplianceStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadComplianceStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadPartnerPermissions struct { + empty bool `json:"-"` + DataLogsViewer *bool `json:"dataLogsViewer"` + ServiceAccessApprover *bool `json:"serviceAccessApprover"` + AssuredWorkloadsMonitoring *bool `json:"assuredWorkloadsMonitoring"` +} + +type jsonWorkloadPartnerPermissions WorkloadPartnerPermissions + +func (r *WorkloadPartnerPermissions) UnmarshalJSON(data []byte) error { + var res jsonWorkloadPartnerPermissions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadPartnerPermissions + } else { + + r.DataLogsViewer = res.DataLogsViewer + + r.ServiceAccessApprover = res.ServiceAccessApprover + + r.AssuredWorkloadsMonitoring = res.AssuredWorkloadsMonitoring + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadPartnerPermissions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadPartnerPermissions *WorkloadPartnerPermissions = &WorkloadPartnerPermissions{empty: true} + +func (r *WorkloadPartnerPermissions) Empty() bool { + return r.empty +} + +func (r *WorkloadPartnerPermissions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadPartnerPermissions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadWorkloadOptions struct { + empty bool `json:"-"` + KajEnrollmentType *WorkloadWorkloadOptionsKajEnrollmentTypeEnum `json:"kajEnrollmentType"` +} + +type jsonWorkloadWorkloadOptions WorkloadWorkloadOptions + +func (r *WorkloadWorkloadOptions) UnmarshalJSON(data []byte) error { + var res jsonWorkloadWorkloadOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadWorkloadOptions + } else { + + r.KajEnrollmentType = res.KajEnrollmentType + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadWorkloadOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadWorkloadOptions *WorkloadWorkloadOptions = &WorkloadWorkloadOptions{empty: true} + +func (r *WorkloadWorkloadOptions) Empty() bool { + return r.empty +} + +func (r *WorkloadWorkloadOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadWorkloadOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadEkmProvisioningResponse struct { + empty bool `json:"-"` + EkmProvisioningState *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum `json:"ekmProvisioningState"` + EkmProvisioningErrorDomain *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum `json:"ekmProvisioningErrorDomain"` + EkmProvisioningErrorMapping *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum `json:"ekmProvisioningErrorMapping"` +} + +type jsonWorkloadEkmProvisioningResponse WorkloadEkmProvisioningResponse + +func (r *WorkloadEkmProvisioningResponse) UnmarshalJSON(data []byte) error { + var res jsonWorkloadEkmProvisioningResponse + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadEkmProvisioningResponse + } else { + + r.EkmProvisioningState = res.EkmProvisioningState + + r.EkmProvisioningErrorDomain = res.EkmProvisioningErrorDomain + + r.EkmProvisioningErrorMapping = res.EkmProvisioningErrorMapping + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadEkmProvisioningResponse is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadEkmProvisioningResponse *WorkloadEkmProvisioningResponse = &WorkloadEkmProvisioningResponse{empty: true} + +func (r *WorkloadEkmProvisioningResponse) Empty() bool { + return r.empty +} + +func (r *WorkloadEkmProvisioningResponse) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadEkmProvisioningResponse) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Workload) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "assured_workloads", + Type: "Workload", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "assuredworkloads", +{{- end }} + } +} + +func (r *Workload) ID() (string, error) { + if err := extractWorkloadFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "resources": dcl.ValueOrEmptyString(nr.Resources), + "compliance_regime": dcl.ValueOrEmptyString(nr.ComplianceRegime), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "billing_account": dcl.ValueOrEmptyString(nr.BillingAccount), + "partner_services_billing_account": dcl.ValueOrEmptyString(nr.PartnerServicesBillingAccount), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "provisioned_resources_parent": dcl.ValueOrEmptyString(nr.ProvisionedResourcesParent), + "kms_settings": dcl.ValueOrEmptyString(nr.KmsSettings), + "resource_settings": dcl.ValueOrEmptyString(nr.ResourceSettings), + "kaj_enrollment_state": dcl.ValueOrEmptyString(nr.KajEnrollmentState), + "enable_sovereign_controls": dcl.ValueOrEmptyString(nr.EnableSovereignControls), + "saa_enrollment_response": dcl.ValueOrEmptyString(nr.SaaEnrollmentResponse), + "compliance_status": dcl.ValueOrEmptyString(nr.ComplianceStatus), + "compliant_but_disallowed_services": dcl.ValueOrEmptyString(nr.CompliantButDisallowedServices), + "partner": dcl.ValueOrEmptyString(nr.Partner), + "partner_permissions": dcl.ValueOrEmptyString(nr.PartnerPermissions), + "workload_options": dcl.ValueOrEmptyString(nr.WorkloadOptions), + "ekm_provisioning_response": dcl.ValueOrEmptyString(nr.EkmProvisioningResponse), + "violation_notifications_enabled": dcl.ValueOrEmptyString(nr.ViolationNotificationsEnabled), + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const WorkloadMaxPage = -1 + +type WorkloadList struct { + Items []*Workload + + nextToken string + + pageSize int32 + + resource *Workload +} + +func (l *WorkloadList) HasNext() bool { + return l.nextToken != "" +} + +func (l *WorkloadList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listWorkload(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListWorkload(ctx context.Context, organization, location string) (*WorkloadList, error) { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListWorkloadWithMaxResults(ctx, organization, location, WorkloadMaxPage) + +} + +func (c *Client) ListWorkloadWithMaxResults(ctx context.Context, organization, location string, pageSize int32) (*WorkloadList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Workload{ + Organization: &organization, + Location: &location, + } + items, token, err := c.listWorkload(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &WorkloadList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetWorkload(ctx context.Context, r *Workload) (*Workload, error) { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractWorkloadFields(r) + + b, err := c.getWorkloadRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalWorkload(b, c, r) + if err != nil { + return nil, err + } + result.Organization = r.Organization + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeWorkloadNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractWorkloadFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteWorkload(ctx context.Context, r *Workload) error { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Workload resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Workload...") + deleteOp := deleteWorkloadOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllWorkload deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllWorkload(ctx context.Context, organization, location string, filter func(*Workload) bool) error { + listObj, err := c.ListWorkload(ctx, organization, location) + if err != nil { + return err + } + + err = c.deleteAllWorkload(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllWorkload(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyWorkload(ctx context.Context, rawDesired *Workload, opts ...dcl.ApplyOption) (*Workload, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + var resultNewState *Workload + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyWorkloadHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyWorkloadHelper(c *Client, ctx context.Context, rawDesired *Workload, opts ...dcl.ApplyOption) (*Workload, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyWorkload...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractWorkloadFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.workloadDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToWorkloadDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []workloadApiOperation + if create { + ops = append(ops, &createWorkloadOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyWorkloadDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyWorkloadDiff(c *Client, ctx context.Context, desired *Workload, rawDesired *Workload, ops []workloadApiOperation, opts ...dcl.ApplyOption) (*Workload, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetWorkload(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createWorkloadOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapWorkload(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeWorkloadNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeWorkloadNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeWorkloadDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractWorkloadFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractWorkloadFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffWorkload(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl new file mode 100644 index 000000000000..bda37fa683ac --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl @@ -0,0 +1,4151 @@ +package assuredworkloads + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Workload) validate() error { + + if err := dcl.Required(r, "displayName"); err != nil { + return err + } + if err := dcl.Required(r, "complianceRegime"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Organization, "Organization"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.KmsSettings) { + if err := r.KmsSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SaaEnrollmentResponse) { + if err := r.SaaEnrollmentResponse.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ComplianceStatus) { + if err := r.ComplianceStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PartnerPermissions) { + if err := r.PartnerPermissions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkloadOptions) { + if err := r.WorkloadOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EkmProvisioningResponse) { + if err := r.EkmProvisioningResponse.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkloadResources) validate() error { + return nil +} +func (r *WorkloadKmsSettings) validate() error { + if err := dcl.Required(r, "nextRotationTime"); err != nil { + return err + } + if err := dcl.Required(r, "rotationPeriod"); err != nil { + return err + } + return nil +} +func (r *WorkloadResourceSettings) validate() error { + return nil +} +func (r *WorkloadSaaEnrollmentResponse) validate() error { + return nil +} +func (r *WorkloadComplianceStatus) validate() error { + return nil +} +func (r *WorkloadPartnerPermissions) validate() error { + return nil +} +func (r *WorkloadWorkloadOptions) validate() error { + return nil +} +func (r *WorkloadEkmProvisioningResponse) validate() error { + return nil +} +func (r *Workload) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } +{{- if ne $.TargetVersionName "ga" }} + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-assuredworkloads.googleapis.com/v1beta1/", params) +{{- else }} + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-assuredworkloads.googleapis.com/v1/", params) +{{- end }} +} + +func (r *Workload) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Workload) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads", nr.basePath(), userBasePath, params), nil + +} + +func (r *Workload) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads", nr.basePath(), userBasePath, params), nil + +} + +func (r *Workload) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// workloadApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type workloadApiOperation interface { + do(context.Context, *Workload, *Client) error +} + +// newUpdateWorkloadUpdateWorkloadRequest creates a request for an +// Workload resource's UpdateWorkload update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateWorkloadUpdateWorkloadRequest(ctx context.Context, f *Workload, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + b, err := c.getWorkloadRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateWorkloadUpdateWorkloadRequest converts the update into +// the final JSON request body. +func marshalUpdateWorkloadUpdateWorkloadRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateWorkloadUpdateWorkloadOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateWorkloadUpdateWorkloadOperation) do(ctx context.Context, r *Workload, c *Client) error { + _, err := c.GetWorkload(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateWorkload") + if err != nil { + return err + } + mask := dcl.UpdateMaskWithPrefix(op.FieldDiffs, "Workload") + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateWorkloadUpdateWorkloadRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateWorkloadUpdateWorkloadRequest(c, req) + if err != nil { + return err + } + _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + return nil +} + +func (c *Client) listWorkloadRaw(ctx context.Context, r *Workload, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != WorkloadMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listWorkloadOperation struct { + Workloads []map[string]interface{} `json:"workloads"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listWorkload(ctx context.Context, r *Workload, pageToken string, pageSize int32) ([]*Workload, string, error) { + b, err := c.listWorkloadRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listWorkloadOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Workload + for _, v := range m.Workloads { + res, err := unmarshalMapWorkload(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Organization = r.Organization + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllWorkload(ctx context.Context, f func(*Workload) bool, resources []*Workload) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteWorkload(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteWorkloadOperation struct{} + +func (op *deleteWorkloadOperation) do(ctx context.Context, r *Workload, c *Client) error { + r, err := c.GetWorkload(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Workload not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetWorkload checking for existence. error: %v", err) + return err + } + + err = r.deleteResources(ctx, c) + if err != nil { + return err + } + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Workload: %w", err) + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetWorkload(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createWorkloadOperation struct { + response map[string]interface{} +} + +func (op *createWorkloadOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createWorkloadOperation) do(ctx context.Context, r *Workload, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + if r.Name != nil { + // Allowing creation to continue with Name set could result in a Workload with the wrong Name. + return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + // Include Name in URL substitution for initial GET request. + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + + if _, err := c.GetWorkload(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getWorkloadRaw(ctx context.Context, r *Workload) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) workloadDiffsForRawDesired(ctx context.Context, rawDesired *Workload, opts ...dcl.ApplyOption) (initial, desired *Workload, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Workload + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Workload); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Workload, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + if fetchState.Name == nil { + // We cannot perform a get because of lack of information. We have to assume + // that this is being created for the first time. + desired, err := canonicalizeWorkloadDesiredState(rawDesired, nil) + return nil, desired, nil, err + } + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetWorkload(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Workload resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Workload resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Workload resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeWorkloadDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Workload: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Workload: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractWorkloadFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeWorkloadInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Workload: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeWorkloadDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Workload: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffWorkload(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeWorkloadInitialState(rawInitial, rawDesired *Workload) (*Workload, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeWorkloadDesiredState(rawDesired, rawInitial *Workload, opts ...dcl.ApplyOption) (*Workload, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.KmsSettings = canonicalizeWorkloadKmsSettings(rawDesired.KmsSettings, nil, opts...) + rawDesired.SaaEnrollmentResponse = canonicalizeWorkloadSaaEnrollmentResponse(rawDesired.SaaEnrollmentResponse, nil, opts...) + rawDesired.ComplianceStatus = canonicalizeWorkloadComplianceStatus(rawDesired.ComplianceStatus, nil, opts...) + rawDesired.PartnerPermissions = canonicalizeWorkloadPartnerPermissions(rawDesired.PartnerPermissions, nil, opts...) + rawDesired.WorkloadOptions = canonicalizeWorkloadWorkloadOptions(rawDesired.WorkloadOptions, nil, opts...) + rawDesired.EkmProvisioningResponse = canonicalizeWorkloadEkmProvisioningResponse(rawDesired.EkmProvisioningResponse, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Workload{} + if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.ComplianceRegime) || (dcl.IsEmptyValueIndirect(rawDesired.ComplianceRegime) && dcl.IsEmptyValueIndirect(rawInitial.ComplianceRegime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.ComplianceRegime = rawInitial.ComplianceRegime + } else { + canonicalDesired.ComplianceRegime = rawDesired.ComplianceRegime + } + if dcl.StringCanonicalize(rawDesired.BillingAccount, rawInitial.BillingAccount) { + canonicalDesired.BillingAccount = rawInitial.BillingAccount + } else { + canonicalDesired.BillingAccount = rawDesired.BillingAccount + } + if dcl.StringCanonicalize(rawDesired.PartnerServicesBillingAccount, rawInitial.PartnerServicesBillingAccount) { + canonicalDesired.PartnerServicesBillingAccount = rawInitial.PartnerServicesBillingAccount + } else { + canonicalDesired.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.ProvisionedResourcesParent, rawInitial.ProvisionedResourcesParent) { + canonicalDesired.ProvisionedResourcesParent = rawInitial.ProvisionedResourcesParent + } else { + canonicalDesired.ProvisionedResourcesParent = rawDesired.ProvisionedResourcesParent + } + canonicalDesired.KmsSettings = canonicalizeWorkloadKmsSettings(rawDesired.KmsSettings, rawInitial.KmsSettings, opts...) + canonicalDesired.ResourceSettings = canonicalizeWorkloadResourceSettingsSlice(rawDesired.ResourceSettings, rawInitial.ResourceSettings, opts...) + if dcl.BoolCanonicalize(rawDesired.EnableSovereignControls, rawInitial.EnableSovereignControls) { + canonicalDesired.EnableSovereignControls = rawInitial.EnableSovereignControls + } else { + canonicalDesired.EnableSovereignControls = rawDesired.EnableSovereignControls + } + if dcl.IsZeroValue(rawDesired.Partner) || (dcl.IsEmptyValueIndirect(rawDesired.Partner) && dcl.IsEmptyValueIndirect(rawInitial.Partner)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Partner = rawInitial.Partner + } else { + canonicalDesired.Partner = rawDesired.Partner + } + canonicalDesired.PartnerPermissions = canonicalizeWorkloadPartnerPermissions(rawDesired.PartnerPermissions, rawInitial.PartnerPermissions, opts...) + canonicalDesired.WorkloadOptions = canonicalizeWorkloadWorkloadOptions(rawDesired.WorkloadOptions, rawInitial.WorkloadOptions, opts...) + if dcl.BoolCanonicalize(rawDesired.ViolationNotificationsEnabled, rawInitial.ViolationNotificationsEnabled) { + canonicalDesired.ViolationNotificationsEnabled = rawInitial.ViolationNotificationsEnabled + } else { + canonicalDesired.ViolationNotificationsEnabled = rawDesired.ViolationNotificationsEnabled + } + if dcl.NameToSelfLink(rawDesired.Organization, rawInitial.Organization) { + canonicalDesired.Organization = rawInitial.Organization + } else { + canonicalDesired.Organization = rawDesired.Organization + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeWorkloadNewState(c *Client, rawNew, rawDesired *Workload) (*Workload, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Resources) && dcl.IsEmptyValueIndirect(rawDesired.Resources) { + rawNew.Resources = rawDesired.Resources + } else { + rawNew.Resources = canonicalizeNewWorkloadResourcesSlice(c, rawDesired.Resources, rawNew.Resources) + } + + if dcl.IsEmptyValueIndirect(rawNew.ComplianceRegime) && dcl.IsEmptyValueIndirect(rawDesired.ComplianceRegime) { + rawNew.ComplianceRegime = rawDesired.ComplianceRegime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.BillingAccount) && dcl.IsEmptyValueIndirect(rawDesired.BillingAccount) { + rawNew.BillingAccount = rawDesired.BillingAccount + } else { + rawNew.BillingAccount = rawDesired.BillingAccount + } + + if dcl.IsEmptyValueIndirect(rawNew.PartnerServicesBillingAccount) && dcl.IsEmptyValueIndirect(rawDesired.PartnerServicesBillingAccount) { + rawNew.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } else { + rawNew.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + rawNew.ProvisionedResourcesParent = rawDesired.ProvisionedResourcesParent + + rawNew.KmsSettings = rawDesired.KmsSettings + + rawNew.ResourceSettings = rawDesired.ResourceSettings + + if dcl.IsEmptyValueIndirect(rawNew.KajEnrollmentState) && dcl.IsEmptyValueIndirect(rawDesired.KajEnrollmentState) { + rawNew.KajEnrollmentState = rawDesired.KajEnrollmentState + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.EnableSovereignControls) && dcl.IsEmptyValueIndirect(rawDesired.EnableSovereignControls) { + rawNew.EnableSovereignControls = rawDesired.EnableSovereignControls + } else { + if dcl.BoolCanonicalize(rawDesired.EnableSovereignControls, rawNew.EnableSovereignControls) { + rawNew.EnableSovereignControls = rawDesired.EnableSovereignControls + } + } + + if dcl.IsEmptyValueIndirect(rawNew.SaaEnrollmentResponse) && dcl.IsEmptyValueIndirect(rawDesired.SaaEnrollmentResponse) { + rawNew.SaaEnrollmentResponse = rawDesired.SaaEnrollmentResponse + } else { + rawNew.SaaEnrollmentResponse = canonicalizeNewWorkloadSaaEnrollmentResponse(c, rawDesired.SaaEnrollmentResponse, rawNew.SaaEnrollmentResponse) + } + + if dcl.IsEmptyValueIndirect(rawNew.ComplianceStatus) && dcl.IsEmptyValueIndirect(rawDesired.ComplianceStatus) { + rawNew.ComplianceStatus = rawDesired.ComplianceStatus + } else { + rawNew.ComplianceStatus = canonicalizeNewWorkloadComplianceStatus(c, rawDesired.ComplianceStatus, rawNew.ComplianceStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.CompliantButDisallowedServices) && dcl.IsEmptyValueIndirect(rawDesired.CompliantButDisallowedServices) { + rawNew.CompliantButDisallowedServices = rawDesired.CompliantButDisallowedServices + } else { + if dcl.StringArrayCanonicalize(rawDesired.CompliantButDisallowedServices, rawNew.CompliantButDisallowedServices) { + rawNew.CompliantButDisallowedServices = rawDesired.CompliantButDisallowedServices + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Partner) && dcl.IsEmptyValueIndirect(rawDesired.Partner) { + rawNew.Partner = rawDesired.Partner + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.PartnerPermissions) && dcl.IsEmptyValueIndirect(rawDesired.PartnerPermissions) { + rawNew.PartnerPermissions = rawDesired.PartnerPermissions + } else { + rawNew.PartnerPermissions = canonicalizeNewWorkloadPartnerPermissions(c, rawDesired.PartnerPermissions, rawNew.PartnerPermissions) + } + + rawNew.WorkloadOptions = rawDesired.WorkloadOptions + + if dcl.IsEmptyValueIndirect(rawNew.EkmProvisioningResponse) && dcl.IsEmptyValueIndirect(rawDesired.EkmProvisioningResponse) { + rawNew.EkmProvisioningResponse = rawDesired.EkmProvisioningResponse + } else { + rawNew.EkmProvisioningResponse = canonicalizeNewWorkloadEkmProvisioningResponse(c, rawDesired.EkmProvisioningResponse, rawNew.EkmProvisioningResponse) + } + + if dcl.IsEmptyValueIndirect(rawNew.ViolationNotificationsEnabled) && dcl.IsEmptyValueIndirect(rawDesired.ViolationNotificationsEnabled) { + rawNew.ViolationNotificationsEnabled = rawDesired.ViolationNotificationsEnabled + } else { + if dcl.BoolCanonicalize(rawDesired.ViolationNotificationsEnabled, rawNew.ViolationNotificationsEnabled) { + rawNew.ViolationNotificationsEnabled = rawDesired.ViolationNotificationsEnabled + } + } + + rawNew.Organization = rawDesired.Organization + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeWorkloadResources(des, initial *WorkloadResources, opts ...dcl.ApplyOption) *WorkloadResources { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadResources{} + + if dcl.IsZeroValue(des.ResourceId) || (dcl.IsEmptyValueIndirect(des.ResourceId) && dcl.IsEmptyValueIndirect(initial.ResourceId)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceId = initial.ResourceId + } else { + cDes.ResourceId = des.ResourceId + } + if dcl.IsZeroValue(des.ResourceType) || (dcl.IsEmptyValueIndirect(des.ResourceType) && dcl.IsEmptyValueIndirect(initial.ResourceType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceType = initial.ResourceType + } else { + cDes.ResourceType = des.ResourceType + } + + return cDes +} + +func canonicalizeWorkloadResourcesSlice(des, initial []WorkloadResources, opts ...dcl.ApplyOption) []WorkloadResources { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadResources, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadResources(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadResources, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadResources(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadResources(c *Client, des, nw *WorkloadResources) *WorkloadResources { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadResources while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadResourcesSet(c *Client, des, nw []WorkloadResources) []WorkloadResources { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadResources + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadResources(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadResourcesSlice(c *Client, des, nw []WorkloadResources) []WorkloadResources { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadResources + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadResources(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadKmsSettings(des, initial *WorkloadKmsSettings, opts ...dcl.ApplyOption) *WorkloadKmsSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadKmsSettings{} + + if dcl.IsZeroValue(des.NextRotationTime) || (dcl.IsEmptyValueIndirect(des.NextRotationTime) && dcl.IsEmptyValueIndirect(initial.NextRotationTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NextRotationTime = initial.NextRotationTime + } else { + cDes.NextRotationTime = des.NextRotationTime + } + if dcl.StringCanonicalize(des.RotationPeriod, initial.RotationPeriod) || dcl.IsZeroValue(des.RotationPeriod) { + cDes.RotationPeriod = initial.RotationPeriod + } else { + cDes.RotationPeriod = des.RotationPeriod + } + + return cDes +} + +func canonicalizeWorkloadKmsSettingsSlice(des, initial []WorkloadKmsSettings, opts ...dcl.ApplyOption) []WorkloadKmsSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadKmsSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadKmsSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadKmsSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadKmsSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadKmsSettings(c *Client, des, nw *WorkloadKmsSettings) *WorkloadKmsSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadKmsSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.RotationPeriod, nw.RotationPeriod) { + nw.RotationPeriod = des.RotationPeriod + } + + return nw +} + +func canonicalizeNewWorkloadKmsSettingsSet(c *Client, des, nw []WorkloadKmsSettings) []WorkloadKmsSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadKmsSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadKmsSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadKmsSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadKmsSettingsSlice(c *Client, des, nw []WorkloadKmsSettings) []WorkloadKmsSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadKmsSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadKmsSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadResourceSettings(des, initial *WorkloadResourceSettings, opts ...dcl.ApplyOption) *WorkloadResourceSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadResourceSettings{} + + if dcl.StringCanonicalize(des.ResourceId, initial.ResourceId) || dcl.IsZeroValue(des.ResourceId) { + cDes.ResourceId = initial.ResourceId + } else { + cDes.ResourceId = des.ResourceId + } + if dcl.IsZeroValue(des.ResourceType) || (dcl.IsEmptyValueIndirect(des.ResourceType) && dcl.IsEmptyValueIndirect(initial.ResourceType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceType = initial.ResourceType + } else { + cDes.ResourceType = des.ResourceType + } + if dcl.StringCanonicalize(des.DisplayName, initial.DisplayName) || dcl.IsZeroValue(des.DisplayName) { + cDes.DisplayName = initial.DisplayName + } else { + cDes.DisplayName = des.DisplayName + } + + return cDes +} + +func canonicalizeWorkloadResourceSettingsSlice(des, initial []WorkloadResourceSettings, opts ...dcl.ApplyOption) []WorkloadResourceSettings { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadResourceSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadResourceSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadResourceSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadResourceSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadResourceSettings(c *Client, des, nw *WorkloadResourceSettings) *WorkloadResourceSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadResourceSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ResourceId, nw.ResourceId) { + nw.ResourceId = des.ResourceId + } + if dcl.StringCanonicalize(des.DisplayName, nw.DisplayName) { + nw.DisplayName = des.DisplayName + } + + return nw +} + +func canonicalizeNewWorkloadResourceSettingsSet(c *Client, des, nw []WorkloadResourceSettings) []WorkloadResourceSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadResourceSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadResourceSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadResourceSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadResourceSettingsSlice(c *Client, des, nw []WorkloadResourceSettings) []WorkloadResourceSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadResourceSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadResourceSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadSaaEnrollmentResponse(des, initial *WorkloadSaaEnrollmentResponse, opts ...dcl.ApplyOption) *WorkloadSaaEnrollmentResponse { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadSaaEnrollmentResponse{} + + if dcl.IsZeroValue(des.SetupErrors) || (dcl.IsEmptyValueIndirect(des.SetupErrors) && dcl.IsEmptyValueIndirect(initial.SetupErrors)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SetupErrors = initial.SetupErrors + } else { + cDes.SetupErrors = des.SetupErrors + } + if dcl.IsZeroValue(des.SetupStatus) || (dcl.IsEmptyValueIndirect(des.SetupStatus) && dcl.IsEmptyValueIndirect(initial.SetupStatus)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SetupStatus = initial.SetupStatus + } else { + cDes.SetupStatus = des.SetupStatus + } + + return cDes +} + +func canonicalizeWorkloadSaaEnrollmentResponseSlice(des, initial []WorkloadSaaEnrollmentResponse, opts ...dcl.ApplyOption) []WorkloadSaaEnrollmentResponse { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadSaaEnrollmentResponse, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadSaaEnrollmentResponse(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadSaaEnrollmentResponse, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadSaaEnrollmentResponse(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadSaaEnrollmentResponse(c *Client, des, nw *WorkloadSaaEnrollmentResponse) *WorkloadSaaEnrollmentResponse { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadSaaEnrollmentResponse while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadSaaEnrollmentResponseSet(c *Client, des, nw []WorkloadSaaEnrollmentResponse) []WorkloadSaaEnrollmentResponse { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadSaaEnrollmentResponse + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadSaaEnrollmentResponseNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadSaaEnrollmentResponse(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadSaaEnrollmentResponseSlice(c *Client, des, nw []WorkloadSaaEnrollmentResponse) []WorkloadSaaEnrollmentResponse { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadSaaEnrollmentResponse + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadSaaEnrollmentResponse(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadComplianceStatus(des, initial *WorkloadComplianceStatus, opts ...dcl.ApplyOption) *WorkloadComplianceStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadComplianceStatus{} + + if dcl.IsZeroValue(des.ActiveViolationCount) || (dcl.IsEmptyValueIndirect(des.ActiveViolationCount) && dcl.IsEmptyValueIndirect(initial.ActiveViolationCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ActiveViolationCount = initial.ActiveViolationCount + } else { + cDes.ActiveViolationCount = des.ActiveViolationCount + } + if dcl.IsZeroValue(des.AcknowledgedViolationCount) || (dcl.IsEmptyValueIndirect(des.AcknowledgedViolationCount) && dcl.IsEmptyValueIndirect(initial.AcknowledgedViolationCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcknowledgedViolationCount = initial.AcknowledgedViolationCount + } else { + cDes.AcknowledgedViolationCount = des.AcknowledgedViolationCount + } + + return cDes +} + +func canonicalizeWorkloadComplianceStatusSlice(des, initial []WorkloadComplianceStatus, opts ...dcl.ApplyOption) []WorkloadComplianceStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadComplianceStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadComplianceStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadComplianceStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadComplianceStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadComplianceStatus(c *Client, des, nw *WorkloadComplianceStatus) *WorkloadComplianceStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadComplianceStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadComplianceStatusSet(c *Client, des, nw []WorkloadComplianceStatus) []WorkloadComplianceStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadComplianceStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadComplianceStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadComplianceStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadComplianceStatusSlice(c *Client, des, nw []WorkloadComplianceStatus) []WorkloadComplianceStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadComplianceStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadComplianceStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadPartnerPermissions(des, initial *WorkloadPartnerPermissions, opts ...dcl.ApplyOption) *WorkloadPartnerPermissions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadPartnerPermissions{} + + if dcl.BoolCanonicalize(des.DataLogsViewer, initial.DataLogsViewer) || dcl.IsZeroValue(des.DataLogsViewer) { + cDes.DataLogsViewer = initial.DataLogsViewer + } else { + cDes.DataLogsViewer = des.DataLogsViewer + } + if dcl.BoolCanonicalize(des.ServiceAccessApprover, initial.ServiceAccessApprover) || dcl.IsZeroValue(des.ServiceAccessApprover) { + cDes.ServiceAccessApprover = initial.ServiceAccessApprover + } else { + cDes.ServiceAccessApprover = des.ServiceAccessApprover + } + if dcl.BoolCanonicalize(des.AssuredWorkloadsMonitoring, initial.AssuredWorkloadsMonitoring) || dcl.IsZeroValue(des.AssuredWorkloadsMonitoring) { + cDes.AssuredWorkloadsMonitoring = initial.AssuredWorkloadsMonitoring + } else { + cDes.AssuredWorkloadsMonitoring = des.AssuredWorkloadsMonitoring + } + + return cDes +} + +func canonicalizeWorkloadPartnerPermissionsSlice(des, initial []WorkloadPartnerPermissions, opts ...dcl.ApplyOption) []WorkloadPartnerPermissions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadPartnerPermissions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadPartnerPermissions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadPartnerPermissions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadPartnerPermissions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadPartnerPermissions(c *Client, des, nw *WorkloadPartnerPermissions) *WorkloadPartnerPermissions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadPartnerPermissions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.DataLogsViewer, nw.DataLogsViewer) { + nw.DataLogsViewer = des.DataLogsViewer + } + if dcl.BoolCanonicalize(des.ServiceAccessApprover, nw.ServiceAccessApprover) { + nw.ServiceAccessApprover = des.ServiceAccessApprover + } + if dcl.BoolCanonicalize(des.AssuredWorkloadsMonitoring, nw.AssuredWorkloadsMonitoring) { + nw.AssuredWorkloadsMonitoring = des.AssuredWorkloadsMonitoring + } + + return nw +} + +func canonicalizeNewWorkloadPartnerPermissionsSet(c *Client, des, nw []WorkloadPartnerPermissions) []WorkloadPartnerPermissions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadPartnerPermissions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadPartnerPermissionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadPartnerPermissions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadPartnerPermissionsSlice(c *Client, des, nw []WorkloadPartnerPermissions) []WorkloadPartnerPermissions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadPartnerPermissions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadPartnerPermissions(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadWorkloadOptions(des, initial *WorkloadWorkloadOptions, opts ...dcl.ApplyOption) *WorkloadWorkloadOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadWorkloadOptions{} + + if dcl.IsZeroValue(des.KajEnrollmentType) || (dcl.IsEmptyValueIndirect(des.KajEnrollmentType) && dcl.IsEmptyValueIndirect(initial.KajEnrollmentType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KajEnrollmentType = initial.KajEnrollmentType + } else { + cDes.KajEnrollmentType = des.KajEnrollmentType + } + + return cDes +} + +func canonicalizeWorkloadWorkloadOptionsSlice(des, initial []WorkloadWorkloadOptions, opts ...dcl.ApplyOption) []WorkloadWorkloadOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadWorkloadOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadWorkloadOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadWorkloadOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadWorkloadOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadWorkloadOptions(c *Client, des, nw *WorkloadWorkloadOptions) *WorkloadWorkloadOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadWorkloadOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadWorkloadOptionsSet(c *Client, des, nw []WorkloadWorkloadOptions) []WorkloadWorkloadOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadWorkloadOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadWorkloadOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadWorkloadOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadWorkloadOptionsSlice(c *Client, des, nw []WorkloadWorkloadOptions) []WorkloadWorkloadOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadWorkloadOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadWorkloadOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadEkmProvisioningResponse(des, initial *WorkloadEkmProvisioningResponse, opts ...dcl.ApplyOption) *WorkloadEkmProvisioningResponse { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadEkmProvisioningResponse{} + + if dcl.IsZeroValue(des.EkmProvisioningState) || (dcl.IsEmptyValueIndirect(des.EkmProvisioningState) && dcl.IsEmptyValueIndirect(initial.EkmProvisioningState)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EkmProvisioningState = initial.EkmProvisioningState + } else { + cDes.EkmProvisioningState = des.EkmProvisioningState + } + if dcl.IsZeroValue(des.EkmProvisioningErrorDomain) || (dcl.IsEmptyValueIndirect(des.EkmProvisioningErrorDomain) && dcl.IsEmptyValueIndirect(initial.EkmProvisioningErrorDomain)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EkmProvisioningErrorDomain = initial.EkmProvisioningErrorDomain + } else { + cDes.EkmProvisioningErrorDomain = des.EkmProvisioningErrorDomain + } + if dcl.IsZeroValue(des.EkmProvisioningErrorMapping) || (dcl.IsEmptyValueIndirect(des.EkmProvisioningErrorMapping) && dcl.IsEmptyValueIndirect(initial.EkmProvisioningErrorMapping)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EkmProvisioningErrorMapping = initial.EkmProvisioningErrorMapping + } else { + cDes.EkmProvisioningErrorMapping = des.EkmProvisioningErrorMapping + } + + return cDes +} + +func canonicalizeWorkloadEkmProvisioningResponseSlice(des, initial []WorkloadEkmProvisioningResponse, opts ...dcl.ApplyOption) []WorkloadEkmProvisioningResponse { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadEkmProvisioningResponse, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadEkmProvisioningResponse(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadEkmProvisioningResponse, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadEkmProvisioningResponse(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadEkmProvisioningResponse(c *Client, des, nw *WorkloadEkmProvisioningResponse) *WorkloadEkmProvisioningResponse { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadEkmProvisioningResponse while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadEkmProvisioningResponseSet(c *Client, des, nw []WorkloadEkmProvisioningResponse) []WorkloadEkmProvisioningResponse { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadEkmProvisioningResponse + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadEkmProvisioningResponseNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadEkmProvisioningResponse(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadEkmProvisioningResponseSlice(c *Client, des, nw []WorkloadEkmProvisioningResponse) []WorkloadEkmProvisioningResponse { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadEkmProvisioningResponse + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadEkmProvisioningResponse(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffWorkload(c *Client, desired, actual *Workload, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkloadUpdateWorkloadOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Resources, actual.Resources, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadResourcesNewStyle, EmptyObject: EmptyWorkloadResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Resources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ComplianceRegime, actual.ComplianceRegime, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComplianceRegime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.BillingAccount, actual.BillingAccount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BillingAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PartnerServicesBillingAccount, actual.PartnerServicesBillingAccount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PartnerServicesBillingAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkloadUpdateWorkloadOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProvisionedResourcesParent, actual.ProvisionedResourcesParent, dcl.DiffInfo{Ignore: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProvisionedResourcesParent")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsSettings, actual.KmsSettings, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadKmsSettingsNewStyle, EmptyObject: EmptyWorkloadKmsSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceSettings, actual.ResourceSettings, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadResourceSettingsNewStyle, EmptyObject: EmptyWorkloadResourceSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KajEnrollmentState, actual.KajEnrollmentState, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KajEnrollmentState")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableSovereignControls, actual.EnableSovereignControls, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableSovereignControls")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SaaEnrollmentResponse, actual.SaaEnrollmentResponse, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadSaaEnrollmentResponseNewStyle, EmptyObject: EmptyWorkloadSaaEnrollmentResponse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SaaEnrollmentResponse")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ComplianceStatus, actual.ComplianceStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadComplianceStatusNewStyle, EmptyObject: EmptyWorkloadComplianceStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComplianceStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CompliantButDisallowedServices, actual.CompliantButDisallowedServices, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CompliantButDisallowedServices")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Partner, actual.Partner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Partner")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PartnerPermissions, actual.PartnerPermissions, dcl.DiffInfo{ObjectFunction: compareWorkloadPartnerPermissionsNewStyle, EmptyObject: EmptyWorkloadPartnerPermissions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PartnerPermissions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadOptions, actual.WorkloadOptions, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadWorkloadOptionsNewStyle, EmptyObject: EmptyWorkloadWorkloadOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.EkmProvisioningResponse, actual.EkmProvisioningResponse, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadEkmProvisioningResponseNewStyle, EmptyObject: EmptyWorkloadEkmProvisioningResponse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningResponse")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ViolationNotificationsEnabled, actual.ViolationNotificationsEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ViolationNotificationsEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Organization, actual.Organization, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Organization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareWorkloadResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadResources) + if !ok { + desiredNotPointer, ok := d.(WorkloadResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResources or *WorkloadResources", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadResources) + if !ok { + actualNotPointer, ok := a.(WorkloadResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResources", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceId, actual.ResourceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceType, actual.ResourceType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadKmsSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadKmsSettings) + if !ok { + desiredNotPointer, ok := d.(WorkloadKmsSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadKmsSettings or *WorkloadKmsSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadKmsSettings) + if !ok { + actualNotPointer, ok := a.(WorkloadKmsSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadKmsSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NextRotationTime, actual.NextRotationTime, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NextRotationTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RotationPeriod, actual.RotationPeriod, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RotationPeriod")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadResourceSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadResourceSettings) + if !ok { + desiredNotPointer, ok := d.(WorkloadResourceSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResourceSettings or *WorkloadResourceSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadResourceSettings) + if !ok { + actualNotPointer, ok := a.(WorkloadResourceSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResourceSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceId, actual.ResourceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceType, actual.ResourceType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadSaaEnrollmentResponseNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadSaaEnrollmentResponse) + if !ok { + desiredNotPointer, ok := d.(WorkloadSaaEnrollmentResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadSaaEnrollmentResponse or *WorkloadSaaEnrollmentResponse", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadSaaEnrollmentResponse) + if !ok { + actualNotPointer, ok := a.(WorkloadSaaEnrollmentResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadSaaEnrollmentResponse", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SetupErrors, actual.SetupErrors, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SetupErrors")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SetupStatus, actual.SetupStatus, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SetupStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadComplianceStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadComplianceStatus) + if !ok { + desiredNotPointer, ok := d.(WorkloadComplianceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadComplianceStatus or *WorkloadComplianceStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadComplianceStatus) + if !ok { + actualNotPointer, ok := a.(WorkloadComplianceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadComplianceStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ActiveViolationCount, actual.ActiveViolationCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ActiveViolationCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcknowledgedViolationCount, actual.AcknowledgedViolationCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcknowledgedViolationCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadPartnerPermissionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadPartnerPermissions) + if !ok { + desiredNotPointer, ok := d.(WorkloadPartnerPermissions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadPartnerPermissions or *WorkloadPartnerPermissions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadPartnerPermissions) + if !ok { + actualNotPointer, ok := a.(WorkloadPartnerPermissions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadPartnerPermissions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataLogsViewer, actual.DataLogsViewer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataLogsViewer")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccessApprover, actual.ServiceAccessApprover, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccessApprover")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssuredWorkloadsMonitoring, actual.AssuredWorkloadsMonitoring, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AssuredWorkloadsMonitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadWorkloadOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadWorkloadOptions) + if !ok { + desiredNotPointer, ok := d.(WorkloadWorkloadOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadWorkloadOptions or *WorkloadWorkloadOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadWorkloadOptions) + if !ok { + actualNotPointer, ok := a.(WorkloadWorkloadOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadWorkloadOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KajEnrollmentType, actual.KajEnrollmentType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KajEnrollmentType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadEkmProvisioningResponseNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadEkmProvisioningResponse) + if !ok { + desiredNotPointer, ok := d.(WorkloadEkmProvisioningResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadEkmProvisioningResponse or *WorkloadEkmProvisioningResponse", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadEkmProvisioningResponse) + if !ok { + actualNotPointer, ok := a.(WorkloadEkmProvisioningResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadEkmProvisioningResponse", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EkmProvisioningState, actual.EkmProvisioningState, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningState")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EkmProvisioningErrorDomain, actual.EkmProvisioningErrorDomain, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningErrorDomain")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EkmProvisioningErrorMapping, actual.EkmProvisioningErrorMapping, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningErrorMapping")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Workload) urlNormalized() *Workload { + normalized := dcl.Copy(*r).(Workload) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.BillingAccount = dcl.SelfLinkToName(r.BillingAccount) + normalized.PartnerServicesBillingAccount = dcl.SelfLinkToName(r.PartnerServicesBillingAccount) + normalized.ProvisionedResourcesParent = dcl.SelfLinkToName(r.ProvisionedResourcesParent) + normalized.Organization = dcl.SelfLinkToName(r.Organization) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Workload) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateWorkload" { + fields := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Workload resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Workload) marshal(c *Client) ([]byte, error) { + m, err := expandWorkload(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Workload: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalWorkload decodes JSON responses into the Workload resource schema. +func unmarshalWorkload(b []byte, c *Client, res *Workload) (*Workload, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapWorkload(m, c, res) +} + +func unmarshalMapWorkload(m map[string]interface{}, c *Client, res *Workload) (*Workload, error) { + + flattened := flattenWorkload(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandWorkload expands Workload into a JSON request object. +func expandWorkload(c *Client, f *Workload) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("organizations/%s/locations/%s/workloads/%s", f.Name, dcl.SelfLinkToName(f.Organization), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.ComplianceRegime; dcl.ValueShouldBeSent(v) { + m["complianceRegime"] = v + } + if v := f.BillingAccount; dcl.ValueShouldBeSent(v) { + m["billingAccount"] = v + } + if v := f.PartnerServicesBillingAccount; dcl.ValueShouldBeSent(v) { + m["partnerServicesBillingAccount"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.ProvisionedResourcesParent; dcl.ValueShouldBeSent(v) { + m["provisionedResourcesParent"] = v + } + if v, err := expandWorkloadKmsSettings(c, f.KmsSettings, res); err != nil { + return nil, fmt.Errorf("error expanding KmsSettings into kmsSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kmsSettings"] = v + } + if v, err := expandWorkloadResourceSettingsSlice(c, f.ResourceSettings, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSettings into resourceSettings: %w", err) + } else if v != nil { + m["resourceSettings"] = v + } + if v := f.EnableSovereignControls; dcl.ValueShouldBeSent(v) { + m["enableSovereignControls"] = v + } + if v := f.Partner; dcl.ValueShouldBeSent(v) { + m["partner"] = v + } + if v, err := expandWorkloadPartnerPermissions(c, f.PartnerPermissions, res); err != nil { + return nil, fmt.Errorf("error expanding PartnerPermissions into partnerPermissions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["partnerPermissions"] = v + } + if v, err := expandWorkloadWorkloadOptions(c, f.WorkloadOptions, res); err != nil { + return nil, fmt.Errorf("error expanding WorkloadOptions into workloadOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workloadOptions"] = v + } + if v := f.ViolationNotificationsEnabled; dcl.ValueShouldBeSent(v) { + m["violationNotificationsEnabled"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Organization into organization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["organization"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenWorkload flattens Workload from a JSON request object into the +// Workload type. +func flattenWorkload(c *Client, i interface{}, res *Workload) *Workload { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Workload{} + resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Resources = flattenWorkloadResourcesSlice(c, m["resources"], res) + resultRes.ComplianceRegime = flattenWorkloadComplianceRegimeEnum(m["complianceRegime"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.BillingAccount = dcl.FlattenString(m["billingAccount"]) + resultRes.PartnerServicesBillingAccount = dcl.FlattenString(m["partnerServicesBillingAccount"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.ProvisionedResourcesParent = dcl.FlattenSecretValue(m["provisionedResourcesParent"]) + resultRes.KmsSettings = flattenWorkloadKmsSettings(c, m["kmsSettings"], res) + resultRes.ResourceSettings = flattenWorkloadResourceSettingsSlice(c, m["resourceSettings"], res) + resultRes.KajEnrollmentState = flattenWorkloadKajEnrollmentStateEnum(m["kajEnrollmentState"]) + resultRes.EnableSovereignControls = dcl.FlattenBool(m["enableSovereignControls"]) + resultRes.SaaEnrollmentResponse = flattenWorkloadSaaEnrollmentResponse(c, m["saaEnrollmentResponse"], res) + resultRes.ComplianceStatus = flattenWorkloadComplianceStatus(c, m["complianceStatus"], res) + resultRes.CompliantButDisallowedServices = dcl.FlattenStringSlice(m["compliantButDisallowedServices"]) + resultRes.Partner = flattenWorkloadPartnerEnum(m["partner"]) + resultRes.PartnerPermissions = flattenWorkloadPartnerPermissions(c, m["partnerPermissions"], res) + resultRes.WorkloadOptions = flattenWorkloadWorkloadOptions(c, m["workloadOptions"], res) + resultRes.EkmProvisioningResponse = flattenWorkloadEkmProvisioningResponse(c, m["ekmProvisioningResponse"], res) + resultRes.ViolationNotificationsEnabled = dcl.FlattenBool(m["violationNotificationsEnabled"]) + resultRes.Organization = dcl.FlattenString(m["organization"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandWorkloadResourcesMap expands the contents of WorkloadResources into a JSON +// request object. +func expandWorkloadResourcesMap(c *Client, f map[string]WorkloadResources, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadResources(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadResourcesSlice expands the contents of WorkloadResources into a JSON +// request object. +func expandWorkloadResourcesSlice(c *Client, f []WorkloadResources, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadResources(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadResourcesMap flattens the contents of WorkloadResources from a JSON +// response object. +func flattenWorkloadResourcesMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResources { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResources{} + } + + if len(a) == 0 { + return map[string]WorkloadResources{} + } + + items := make(map[string]WorkloadResources) + for k, item := range a { + items[k] = *flattenWorkloadResources(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadResourcesSlice flattens the contents of WorkloadResources from a JSON +// response object. +func flattenWorkloadResourcesSlice(c *Client, i interface{}, res *Workload) []WorkloadResources { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResources{} + } + + if len(a) == 0 { + return []WorkloadResources{} + } + + items := make([]WorkloadResources, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResources(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadResources expands an instance of WorkloadResources into a JSON +// request object. +func expandWorkloadResources(c *Client, f *WorkloadResources, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceId; !dcl.IsEmptyValueIndirect(v) { + m["resourceId"] = v + } + if v := f.ResourceType; !dcl.IsEmptyValueIndirect(v) { + m["resourceType"] = v + } + + return m, nil +} + +// flattenWorkloadResources flattens an instance of WorkloadResources from a JSON +// response object. +func flattenWorkloadResources(c *Client, i interface{}, res *Workload) *WorkloadResources { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadResources{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadResources + } + r.ResourceId = dcl.FlattenInteger(m["resourceId"]) + r.ResourceType = flattenWorkloadResourcesResourceTypeEnum(m["resourceType"]) + + return r +} + +// expandWorkloadKmsSettingsMap expands the contents of WorkloadKmsSettings into a JSON +// request object. +func expandWorkloadKmsSettingsMap(c *Client, f map[string]WorkloadKmsSettings, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadKmsSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadKmsSettingsSlice expands the contents of WorkloadKmsSettings into a JSON +// request object. +func expandWorkloadKmsSettingsSlice(c *Client, f []WorkloadKmsSettings, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadKmsSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadKmsSettingsMap flattens the contents of WorkloadKmsSettings from a JSON +// response object. +func flattenWorkloadKmsSettingsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadKmsSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadKmsSettings{} + } + + if len(a) == 0 { + return map[string]WorkloadKmsSettings{} + } + + items := make(map[string]WorkloadKmsSettings) + for k, item := range a { + items[k] = *flattenWorkloadKmsSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadKmsSettingsSlice flattens the contents of WorkloadKmsSettings from a JSON +// response object. +func flattenWorkloadKmsSettingsSlice(c *Client, i interface{}, res *Workload) []WorkloadKmsSettings { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadKmsSettings{} + } + + if len(a) == 0 { + return []WorkloadKmsSettings{} + } + + items := make([]WorkloadKmsSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadKmsSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadKmsSettings expands an instance of WorkloadKmsSettings into a JSON +// request object. +func expandWorkloadKmsSettings(c *Client, f *WorkloadKmsSettings, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NextRotationTime; !dcl.IsEmptyValueIndirect(v) { + m["nextRotationTime"] = v + } + if v := f.RotationPeriod; !dcl.IsEmptyValueIndirect(v) { + m["rotationPeriod"] = v + } + + return m, nil +} + +// flattenWorkloadKmsSettings flattens an instance of WorkloadKmsSettings from a JSON +// response object. +func flattenWorkloadKmsSettings(c *Client, i interface{}, res *Workload) *WorkloadKmsSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadKmsSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadKmsSettings + } + r.NextRotationTime = dcl.FlattenString(m["nextRotationTime"]) + r.RotationPeriod = dcl.FlattenString(m["rotationPeriod"]) + + return r +} + +// expandWorkloadResourceSettingsMap expands the contents of WorkloadResourceSettings into a JSON +// request object. +func expandWorkloadResourceSettingsMap(c *Client, f map[string]WorkloadResourceSettings, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadResourceSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadResourceSettingsSlice expands the contents of WorkloadResourceSettings into a JSON +// request object. +func expandWorkloadResourceSettingsSlice(c *Client, f []WorkloadResourceSettings, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadResourceSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadResourceSettingsMap flattens the contents of WorkloadResourceSettings from a JSON +// response object. +func flattenWorkloadResourceSettingsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResourceSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResourceSettings{} + } + + if len(a) == 0 { + return map[string]WorkloadResourceSettings{} + } + + items := make(map[string]WorkloadResourceSettings) + for k, item := range a { + items[k] = *flattenWorkloadResourceSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadResourceSettingsSlice flattens the contents of WorkloadResourceSettings from a JSON +// response object. +func flattenWorkloadResourceSettingsSlice(c *Client, i interface{}, res *Workload) []WorkloadResourceSettings { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResourceSettings{} + } + + if len(a) == 0 { + return []WorkloadResourceSettings{} + } + + items := make([]WorkloadResourceSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResourceSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadResourceSettings expands an instance of WorkloadResourceSettings into a JSON +// request object. +func expandWorkloadResourceSettings(c *Client, f *WorkloadResourceSettings, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceId; !dcl.IsEmptyValueIndirect(v) { + m["resourceId"] = v + } + if v := f.ResourceType; !dcl.IsEmptyValueIndirect(v) { + m["resourceType"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + m["displayName"] = v + } + + return m, nil +} + +// flattenWorkloadResourceSettings flattens an instance of WorkloadResourceSettings from a JSON +// response object. +func flattenWorkloadResourceSettings(c *Client, i interface{}, res *Workload) *WorkloadResourceSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadResourceSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadResourceSettings + } + r.ResourceId = dcl.FlattenString(m["resourceId"]) + r.ResourceType = flattenWorkloadResourceSettingsResourceTypeEnum(m["resourceType"]) + r.DisplayName = dcl.FlattenString(m["displayName"]) + + return r +} + +// expandWorkloadSaaEnrollmentResponseMap expands the contents of WorkloadSaaEnrollmentResponse into a JSON +// request object. +func expandWorkloadSaaEnrollmentResponseMap(c *Client, f map[string]WorkloadSaaEnrollmentResponse, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadSaaEnrollmentResponse(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadSaaEnrollmentResponseSlice expands the contents of WorkloadSaaEnrollmentResponse into a JSON +// request object. +func expandWorkloadSaaEnrollmentResponseSlice(c *Client, f []WorkloadSaaEnrollmentResponse, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadSaaEnrollmentResponse(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadSaaEnrollmentResponseMap flattens the contents of WorkloadSaaEnrollmentResponse from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseMap(c *Client, i interface{}, res *Workload) map[string]WorkloadSaaEnrollmentResponse { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadSaaEnrollmentResponse{} + } + + if len(a) == 0 { + return map[string]WorkloadSaaEnrollmentResponse{} + } + + items := make(map[string]WorkloadSaaEnrollmentResponse) + for k, item := range a { + items[k] = *flattenWorkloadSaaEnrollmentResponse(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSlice flattens the contents of WorkloadSaaEnrollmentResponse from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSlice(c *Client, i interface{}, res *Workload) []WorkloadSaaEnrollmentResponse { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadSaaEnrollmentResponse{} + } + + if len(a) == 0 { + return []WorkloadSaaEnrollmentResponse{} + } + + items := make([]WorkloadSaaEnrollmentResponse, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadSaaEnrollmentResponse(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadSaaEnrollmentResponse expands an instance of WorkloadSaaEnrollmentResponse into a JSON +// request object. +func expandWorkloadSaaEnrollmentResponse(c *Client, f *WorkloadSaaEnrollmentResponse, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SetupErrors; v != nil { + m["setupErrors"] = v + } + if v := f.SetupStatus; !dcl.IsEmptyValueIndirect(v) { + m["setupStatus"] = v + } + + return m, nil +} + +// flattenWorkloadSaaEnrollmentResponse flattens an instance of WorkloadSaaEnrollmentResponse from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponse(c *Client, i interface{}, res *Workload) *WorkloadSaaEnrollmentResponse { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadSaaEnrollmentResponse{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadSaaEnrollmentResponse + } + r.SetupErrors = flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumSlice(c, m["setupErrors"], res) + r.SetupStatus = flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(m["setupStatus"]) + + return r +} + +// expandWorkloadComplianceStatusMap expands the contents of WorkloadComplianceStatus into a JSON +// request object. +func expandWorkloadComplianceStatusMap(c *Client, f map[string]WorkloadComplianceStatus, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadComplianceStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadComplianceStatusSlice expands the contents of WorkloadComplianceStatus into a JSON +// request object. +func expandWorkloadComplianceStatusSlice(c *Client, f []WorkloadComplianceStatus, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadComplianceStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadComplianceStatusMap flattens the contents of WorkloadComplianceStatus from a JSON +// response object. +func flattenWorkloadComplianceStatusMap(c *Client, i interface{}, res *Workload) map[string]WorkloadComplianceStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadComplianceStatus{} + } + + if len(a) == 0 { + return map[string]WorkloadComplianceStatus{} + } + + items := make(map[string]WorkloadComplianceStatus) + for k, item := range a { + items[k] = *flattenWorkloadComplianceStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadComplianceStatusSlice flattens the contents of WorkloadComplianceStatus from a JSON +// response object. +func flattenWorkloadComplianceStatusSlice(c *Client, i interface{}, res *Workload) []WorkloadComplianceStatus { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadComplianceStatus{} + } + + if len(a) == 0 { + return []WorkloadComplianceStatus{} + } + + items := make([]WorkloadComplianceStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadComplianceStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadComplianceStatus expands an instance of WorkloadComplianceStatus into a JSON +// request object. +func expandWorkloadComplianceStatus(c *Client, f *WorkloadComplianceStatus, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ActiveViolationCount; v != nil { + m["activeViolationCount"] = v + } + if v := f.AcknowledgedViolationCount; v != nil { + m["acknowledgedViolationCount"] = v + } + + return m, nil +} + +// flattenWorkloadComplianceStatus flattens an instance of WorkloadComplianceStatus from a JSON +// response object. +func flattenWorkloadComplianceStatus(c *Client, i interface{}, res *Workload) *WorkloadComplianceStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadComplianceStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadComplianceStatus + } + r.ActiveViolationCount = dcl.FlattenIntSlice(m["activeViolationCount"]) + r.AcknowledgedViolationCount = dcl.FlattenIntSlice(m["acknowledgedViolationCount"]) + + return r +} + +// expandWorkloadPartnerPermissionsMap expands the contents of WorkloadPartnerPermissions into a JSON +// request object. +func expandWorkloadPartnerPermissionsMap(c *Client, f map[string]WorkloadPartnerPermissions, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadPartnerPermissions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadPartnerPermissionsSlice expands the contents of WorkloadPartnerPermissions into a JSON +// request object. +func expandWorkloadPartnerPermissionsSlice(c *Client, f []WorkloadPartnerPermissions, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadPartnerPermissions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadPartnerPermissionsMap flattens the contents of WorkloadPartnerPermissions from a JSON +// response object. +func flattenWorkloadPartnerPermissionsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadPartnerPermissions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadPartnerPermissions{} + } + + if len(a) == 0 { + return map[string]WorkloadPartnerPermissions{} + } + + items := make(map[string]WorkloadPartnerPermissions) + for k, item := range a { + items[k] = *flattenWorkloadPartnerPermissions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadPartnerPermissionsSlice flattens the contents of WorkloadPartnerPermissions from a JSON +// response object. +func flattenWorkloadPartnerPermissionsSlice(c *Client, i interface{}, res *Workload) []WorkloadPartnerPermissions { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadPartnerPermissions{} + } + + if len(a) == 0 { + return []WorkloadPartnerPermissions{} + } + + items := make([]WorkloadPartnerPermissions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadPartnerPermissions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadPartnerPermissions expands an instance of WorkloadPartnerPermissions into a JSON +// request object. +func expandWorkloadPartnerPermissions(c *Client, f *WorkloadPartnerPermissions, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataLogsViewer; !dcl.IsEmptyValueIndirect(v) { + m["dataLogsViewer"] = v + } + if v := f.ServiceAccessApprover; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccessApprover"] = v + } + if v := f.AssuredWorkloadsMonitoring; !dcl.IsEmptyValueIndirect(v) { + m["assuredWorkloadsMonitoring"] = v + } + + return m, nil +} + +// flattenWorkloadPartnerPermissions flattens an instance of WorkloadPartnerPermissions from a JSON +// response object. +func flattenWorkloadPartnerPermissions(c *Client, i interface{}, res *Workload) *WorkloadPartnerPermissions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadPartnerPermissions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadPartnerPermissions + } + r.DataLogsViewer = dcl.FlattenBool(m["dataLogsViewer"]) + r.ServiceAccessApprover = dcl.FlattenBool(m["serviceAccessApprover"]) + r.AssuredWorkloadsMonitoring = dcl.FlattenBool(m["assuredWorkloadsMonitoring"]) + + return r +} + +// expandWorkloadWorkloadOptionsMap expands the contents of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptionsMap(c *Client, f map[string]WorkloadWorkloadOptions, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadWorkloadOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadWorkloadOptionsSlice expands the contents of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptionsSlice(c *Client, f []WorkloadWorkloadOptions, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadWorkloadOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadWorkloadOptionsMap flattens the contents of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptionsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadWorkloadOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadWorkloadOptions{} + } + + if len(a) == 0 { + return map[string]WorkloadWorkloadOptions{} + } + + items := make(map[string]WorkloadWorkloadOptions) + for k, item := range a { + items[k] = *flattenWorkloadWorkloadOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadWorkloadOptionsSlice flattens the contents of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptionsSlice(c *Client, i interface{}, res *Workload) []WorkloadWorkloadOptions { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadWorkloadOptions{} + } + + if len(a) == 0 { + return []WorkloadWorkloadOptions{} + } + + items := make([]WorkloadWorkloadOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadWorkloadOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadWorkloadOptions expands an instance of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptions(c *Client, f *WorkloadWorkloadOptions, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KajEnrollmentType; !dcl.IsEmptyValueIndirect(v) { + m["kajEnrollmentType"] = v + } + + return m, nil +} + +// flattenWorkloadWorkloadOptions flattens an instance of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptions(c *Client, i interface{}, res *Workload) *WorkloadWorkloadOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadWorkloadOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadWorkloadOptions + } + r.KajEnrollmentType = flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(m["kajEnrollmentType"]) + + return r +} + +// expandWorkloadEkmProvisioningResponseMap expands the contents of WorkloadEkmProvisioningResponse into a JSON +// request object. +func expandWorkloadEkmProvisioningResponseMap(c *Client, f map[string]WorkloadEkmProvisioningResponse, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadEkmProvisioningResponse(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadEkmProvisioningResponseSlice expands the contents of WorkloadEkmProvisioningResponse into a JSON +// request object. +func expandWorkloadEkmProvisioningResponseSlice(c *Client, f []WorkloadEkmProvisioningResponse, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadEkmProvisioningResponse(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadEkmProvisioningResponseMap flattens the contents of WorkloadEkmProvisioningResponse from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponse { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponse{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponse{} + } + + items := make(map[string]WorkloadEkmProvisioningResponse) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponse(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseSlice flattens the contents of WorkloadEkmProvisioningResponse from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponse { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponse{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponse{} + } + + items := make([]WorkloadEkmProvisioningResponse, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponse(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadEkmProvisioningResponse expands an instance of WorkloadEkmProvisioningResponse into a JSON +// request object. +func expandWorkloadEkmProvisioningResponse(c *Client, f *WorkloadEkmProvisioningResponse, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EkmProvisioningState; !dcl.IsEmptyValueIndirect(v) { + m["ekmProvisioningState"] = v + } + if v := f.EkmProvisioningErrorDomain; !dcl.IsEmptyValueIndirect(v) { + m["ekmProvisioningErrorDomain"] = v + } + if v := f.EkmProvisioningErrorMapping; !dcl.IsEmptyValueIndirect(v) { + m["ekmProvisioningErrorMapping"] = v + } + + return m, nil +} + +// flattenWorkloadEkmProvisioningResponse flattens an instance of WorkloadEkmProvisioningResponse from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponse(c *Client, i interface{}, res *Workload) *WorkloadEkmProvisioningResponse { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadEkmProvisioningResponse{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadEkmProvisioningResponse + } + r.EkmProvisioningState = flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(m["ekmProvisioningState"]) + r.EkmProvisioningErrorDomain = flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(m["ekmProvisioningErrorDomain"]) + r.EkmProvisioningErrorMapping = flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(m["ekmProvisioningErrorMapping"]) + + return r +} + +// flattenWorkloadResourcesResourceTypeEnumMap flattens the contents of WorkloadResourcesResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourcesResourceTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResourcesResourceTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResourcesResourceTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadResourcesResourceTypeEnum{} + } + + items := make(map[string]WorkloadResourcesResourceTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadResourcesResourceTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadResourcesResourceTypeEnumSlice flattens the contents of WorkloadResourcesResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourcesResourceTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadResourcesResourceTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResourcesResourceTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadResourcesResourceTypeEnum{} + } + + items := make([]WorkloadResourcesResourceTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResourcesResourceTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadResourcesResourceTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadResourcesResourceTypeEnum with the same value as that string. +func flattenWorkloadResourcesResourceTypeEnum(i interface{}) *WorkloadResourcesResourceTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadResourcesResourceTypeEnumRef(s) +} + +// flattenWorkloadComplianceRegimeEnumMap flattens the contents of WorkloadComplianceRegimeEnum from a JSON +// response object. +func flattenWorkloadComplianceRegimeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadComplianceRegimeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadComplianceRegimeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadComplianceRegimeEnum{} + } + + items := make(map[string]WorkloadComplianceRegimeEnum) + for k, item := range a { + items[k] = *flattenWorkloadComplianceRegimeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadComplianceRegimeEnumSlice flattens the contents of WorkloadComplianceRegimeEnum from a JSON +// response object. +func flattenWorkloadComplianceRegimeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadComplianceRegimeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadComplianceRegimeEnum{} + } + + if len(a) == 0 { + return []WorkloadComplianceRegimeEnum{} + } + + items := make([]WorkloadComplianceRegimeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadComplianceRegimeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadComplianceRegimeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadComplianceRegimeEnum with the same value as that string. +func flattenWorkloadComplianceRegimeEnum(i interface{}) *WorkloadComplianceRegimeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadComplianceRegimeEnumRef(s) +} + +// flattenWorkloadResourceSettingsResourceTypeEnumMap flattens the contents of WorkloadResourceSettingsResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourceSettingsResourceTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResourceSettingsResourceTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResourceSettingsResourceTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadResourceSettingsResourceTypeEnum{} + } + + items := make(map[string]WorkloadResourceSettingsResourceTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadResourceSettingsResourceTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadResourceSettingsResourceTypeEnumSlice flattens the contents of WorkloadResourceSettingsResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourceSettingsResourceTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadResourceSettingsResourceTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResourceSettingsResourceTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadResourceSettingsResourceTypeEnum{} + } + + items := make([]WorkloadResourceSettingsResourceTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResourceSettingsResourceTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadResourceSettingsResourceTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadResourceSettingsResourceTypeEnum with the same value as that string. +func flattenWorkloadResourceSettingsResourceTypeEnum(i interface{}) *WorkloadResourceSettingsResourceTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadResourceSettingsResourceTypeEnumRef(s) +} + +// flattenWorkloadKajEnrollmentStateEnumMap flattens the contents of WorkloadKajEnrollmentStateEnum from a JSON +// response object. +func flattenWorkloadKajEnrollmentStateEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadKajEnrollmentStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadKajEnrollmentStateEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadKajEnrollmentStateEnum{} + } + + items := make(map[string]WorkloadKajEnrollmentStateEnum) + for k, item := range a { + items[k] = *flattenWorkloadKajEnrollmentStateEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadKajEnrollmentStateEnumSlice flattens the contents of WorkloadKajEnrollmentStateEnum from a JSON +// response object. +func flattenWorkloadKajEnrollmentStateEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadKajEnrollmentStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadKajEnrollmentStateEnum{} + } + + if len(a) == 0 { + return []WorkloadKajEnrollmentStateEnum{} + } + + items := make([]WorkloadKajEnrollmentStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadKajEnrollmentStateEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadKajEnrollmentStateEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadKajEnrollmentStateEnum with the same value as that string. +func flattenWorkloadKajEnrollmentStateEnum(i interface{}) *WorkloadKajEnrollmentStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadKajEnrollmentStateEnumRef(s) +} + +// flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumMap flattens the contents of WorkloadSaaEnrollmentResponseSetupErrorsEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + items := make(map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum) + for k, item := range a { + items[k] = *flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumSlice flattens the contents of WorkloadSaaEnrollmentResponseSetupErrorsEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadSaaEnrollmentResponseSetupErrorsEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + if len(a) == 0 { + return []WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + items := make([]WorkloadSaaEnrollmentResponseSetupErrorsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadSaaEnrollmentResponseSetupErrorsEnum with the same value as that string. +func flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum(i interface{}) *WorkloadSaaEnrollmentResponseSetupErrorsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadSaaEnrollmentResponseSetupErrorsEnumRef(s) +} + +// flattenWorkloadSaaEnrollmentResponseSetupStatusEnumMap flattens the contents of WorkloadSaaEnrollmentResponseSetupStatusEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupStatusEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + items := make(map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum) + for k, item := range a { + items[k] = *flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupStatusEnumSlice flattens the contents of WorkloadSaaEnrollmentResponseSetupStatusEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupStatusEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadSaaEnrollmentResponseSetupStatusEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + if len(a) == 0 { + return []WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + items := make([]WorkloadSaaEnrollmentResponseSetupStatusEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupStatusEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadSaaEnrollmentResponseSetupStatusEnum with the same value as that string. +func flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(i interface{}) *WorkloadSaaEnrollmentResponseSetupStatusEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadSaaEnrollmentResponseSetupStatusEnumRef(s) +} + +// flattenWorkloadPartnerEnumMap flattens the contents of WorkloadPartnerEnum from a JSON +// response object. +func flattenWorkloadPartnerEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadPartnerEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadPartnerEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadPartnerEnum{} + } + + items := make(map[string]WorkloadPartnerEnum) + for k, item := range a { + items[k] = *flattenWorkloadPartnerEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadPartnerEnumSlice flattens the contents of WorkloadPartnerEnum from a JSON +// response object. +func flattenWorkloadPartnerEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadPartnerEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadPartnerEnum{} + } + + if len(a) == 0 { + return []WorkloadPartnerEnum{} + } + + items := make([]WorkloadPartnerEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadPartnerEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadPartnerEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadPartnerEnum with the same value as that string. +func flattenWorkloadPartnerEnum(i interface{}) *WorkloadPartnerEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadPartnerEnumRef(s) +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumMap flattens the contents of WorkloadWorkloadOptionsKajEnrollmentTypeEnum from a JSON +// response object. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + items := make(map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumSlice flattens the contents of WorkloadWorkloadOptionsKajEnrollmentTypeEnum from a JSON +// response object. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + items := make([]WorkloadWorkloadOptionsKajEnrollmentTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadWorkloadOptionsKajEnrollmentTypeEnum with the same value as that string. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(i interface{}) *WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(s) +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningStateEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + items := make(map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumSlice flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningStateEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + items := make([]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum with the same value as that string. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(i interface{}) *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadEkmProvisioningResponseEkmProvisioningStateEnumRef(s) +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + items := make(map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumSlice flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + items := make([]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum with the same value as that string. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(i interface{}) *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumRef(s) +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + items := make(map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumSlice flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + items := make([]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum with the same value as that string. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(i interface{}) *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Workload) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalWorkload(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Organization == nil && ncr.Organization == nil { + c.Config.Logger.Info("Both Organization fields null - considering equal.") + } else if nr.Organization == nil || ncr.Organization == nil { + c.Config.Logger.Info("Only one Organization field is null - considering unequal.") + return false + } else if *nr.Organization != *ncr.Organization { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type workloadDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp workloadApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToWorkloadDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]workloadDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []workloadDiff + // For each operation name, create a workloadDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := workloadDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToWorkloadApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToWorkloadApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (workloadApiOperation, error) { + switch opName { + + case "updateWorkloadUpdateWorkloadOperation": + return &updateWorkloadUpdateWorkloadOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractWorkloadFields(r *Workload) error { + vKmsSettings := r.KmsSettings + if vKmsSettings == nil { + // note: explicitly not the empty object. + vKmsSettings = &WorkloadKmsSettings{} + } + if err := extractWorkloadKmsSettingsFields(r, vKmsSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKmsSettings) { + r.KmsSettings = vKmsSettings + } + vSaaEnrollmentResponse := r.SaaEnrollmentResponse + if vSaaEnrollmentResponse == nil { + // note: explicitly not the empty object. + vSaaEnrollmentResponse = &WorkloadSaaEnrollmentResponse{} + } + if err := extractWorkloadSaaEnrollmentResponseFields(r, vSaaEnrollmentResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSaaEnrollmentResponse) { + r.SaaEnrollmentResponse = vSaaEnrollmentResponse + } + vComplianceStatus := r.ComplianceStatus + if vComplianceStatus == nil { + // note: explicitly not the empty object. + vComplianceStatus = &WorkloadComplianceStatus{} + } + if err := extractWorkloadComplianceStatusFields(r, vComplianceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComplianceStatus) { + r.ComplianceStatus = vComplianceStatus + } + vPartnerPermissions := r.PartnerPermissions + if vPartnerPermissions == nil { + // note: explicitly not the empty object. + vPartnerPermissions = &WorkloadPartnerPermissions{} + } + if err := extractWorkloadPartnerPermissionsFields(r, vPartnerPermissions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPartnerPermissions) { + r.PartnerPermissions = vPartnerPermissions + } + vWorkloadOptions := r.WorkloadOptions + if vWorkloadOptions == nil { + // note: explicitly not the empty object. + vWorkloadOptions = &WorkloadWorkloadOptions{} + } + if err := extractWorkloadWorkloadOptionsFields(r, vWorkloadOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadOptions) { + r.WorkloadOptions = vWorkloadOptions + } + vEkmProvisioningResponse := r.EkmProvisioningResponse + if vEkmProvisioningResponse == nil { + // note: explicitly not the empty object. + vEkmProvisioningResponse = &WorkloadEkmProvisioningResponse{} + } + if err := extractWorkloadEkmProvisioningResponseFields(r, vEkmProvisioningResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEkmProvisioningResponse) { + r.EkmProvisioningResponse = vEkmProvisioningResponse + } + return nil +} +func extractWorkloadResourcesFields(r *Workload, o *WorkloadResources) error { + return nil +} +func extractWorkloadKmsSettingsFields(r *Workload, o *WorkloadKmsSettings) error { + return nil +} +func extractWorkloadResourceSettingsFields(r *Workload, o *WorkloadResourceSettings) error { + return nil +} +func extractWorkloadSaaEnrollmentResponseFields(r *Workload, o *WorkloadSaaEnrollmentResponse) error { + return nil +} +func extractWorkloadComplianceStatusFields(r *Workload, o *WorkloadComplianceStatus) error { + return nil +} +func extractWorkloadPartnerPermissionsFields(r *Workload, o *WorkloadPartnerPermissions) error { + return nil +} +func extractWorkloadWorkloadOptionsFields(r *Workload, o *WorkloadWorkloadOptions) error { + return nil +} +func extractWorkloadEkmProvisioningResponseFields(r *Workload, o *WorkloadEkmProvisioningResponse) error { + return nil +} + +func postReadExtractWorkloadFields(r *Workload) error { + vKmsSettings := r.KmsSettings + if vKmsSettings == nil { + // note: explicitly not the empty object. + vKmsSettings = &WorkloadKmsSettings{} + } + if err := postReadExtractWorkloadKmsSettingsFields(r, vKmsSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKmsSettings) { + r.KmsSettings = vKmsSettings + } + vSaaEnrollmentResponse := r.SaaEnrollmentResponse + if vSaaEnrollmentResponse == nil { + // note: explicitly not the empty object. + vSaaEnrollmentResponse = &WorkloadSaaEnrollmentResponse{} + } + if err := postReadExtractWorkloadSaaEnrollmentResponseFields(r, vSaaEnrollmentResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSaaEnrollmentResponse) { + r.SaaEnrollmentResponse = vSaaEnrollmentResponse + } + vComplianceStatus := r.ComplianceStatus + if vComplianceStatus == nil { + // note: explicitly not the empty object. + vComplianceStatus = &WorkloadComplianceStatus{} + } + if err := postReadExtractWorkloadComplianceStatusFields(r, vComplianceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComplianceStatus) { + r.ComplianceStatus = vComplianceStatus + } + vPartnerPermissions := r.PartnerPermissions + if vPartnerPermissions == nil { + // note: explicitly not the empty object. + vPartnerPermissions = &WorkloadPartnerPermissions{} + } + if err := postReadExtractWorkloadPartnerPermissionsFields(r, vPartnerPermissions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPartnerPermissions) { + r.PartnerPermissions = vPartnerPermissions + } + vWorkloadOptions := r.WorkloadOptions + if vWorkloadOptions == nil { + // note: explicitly not the empty object. + vWorkloadOptions = &WorkloadWorkloadOptions{} + } + if err := postReadExtractWorkloadWorkloadOptionsFields(r, vWorkloadOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadOptions) { + r.WorkloadOptions = vWorkloadOptions + } + vEkmProvisioningResponse := r.EkmProvisioningResponse + if vEkmProvisioningResponse == nil { + // note: explicitly not the empty object. + vEkmProvisioningResponse = &WorkloadEkmProvisioningResponse{} + } + if err := postReadExtractWorkloadEkmProvisioningResponseFields(r, vEkmProvisioningResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEkmProvisioningResponse) { + r.EkmProvisioningResponse = vEkmProvisioningResponse + } + return nil +} +func postReadExtractWorkloadResourcesFields(r *Workload, o *WorkloadResources) error { + return nil +} +func postReadExtractWorkloadKmsSettingsFields(r *Workload, o *WorkloadKmsSettings) error { + return nil +} +func postReadExtractWorkloadResourceSettingsFields(r *Workload, o *WorkloadResourceSettings) error { + return nil +} +func postReadExtractWorkloadSaaEnrollmentResponseFields(r *Workload, o *WorkloadSaaEnrollmentResponse) error { + return nil +} +func postReadExtractWorkloadComplianceStatusFields(r *Workload, o *WorkloadComplianceStatus) error { + return nil +} +func postReadExtractWorkloadPartnerPermissionsFields(r *Workload, o *WorkloadPartnerPermissions) error { + return nil +} +func postReadExtractWorkloadWorkloadOptionsFields(r *Workload, o *WorkloadWorkloadOptions) error { + return nil +} +func postReadExtractWorkloadEkmProvisioningResponseFields(r *Workload, o *WorkloadEkmProvisioningResponse) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/client.go b/mmv1/third_party/terraform/services/cloudbuild/client.go new file mode 100644 index 000000000000..daccbd324abc --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/client.go @@ -0,0 +1,18 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go b/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go new file mode 100644 index 000000000000..879c3819e6b2 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go @@ -0,0 +1,103 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// betaToGaPrivatePool is populating GA specific PrivatePoolV1Config values and setting WorkerConfig and NetworkConfig to nil. +// r.PrivatePoolV1Config and c points to the same object. +func betaToGaPrivatePool(r *WorkerPool, c *WorkerPoolPrivatePoolV1Config) *WorkerPoolPrivatePoolV1Config { + cfgWorkerConfig := &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + cfgNetworkConfig := &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + cfgPrivateServiceConnect := &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + if r.WorkerConfig != nil { + cfgWorkerConfig.DiskSizeGb = r.WorkerConfig.DiskSizeGb + cfgWorkerConfig.MachineType = r.WorkerConfig.MachineType + cfgWorkerConfig.EnableNestedVirtualization = r.WorkerConfig.EnableNestedVirtualization + cfgNetworkConfig.EgressOption = noExternalIPEnum(r.WorkerConfig.NoExternalIP) + } + if r.NetworkConfig != nil { + cfgNetworkConfig.PeeredNetwork = r.NetworkConfig.PeeredNetwork + cfgNetworkConfig.PeeredNetworkIPRange = r.NetworkConfig.PeeredNetworkIPRange + } + if r.PrivateServiceConnect != nil { + cfgPrivateServiceConnect.NetworkAttachment = r.PrivateServiceConnect.NetworkAttachment + cfgPrivateServiceConnect.RouteAllTraffic = r.PrivateServiceConnect.RouteAllTraffic + if r.WorkerConfig != nil { + cfgPrivateServiceConnect.PublicIPAddressDisabled = r.WorkerConfig.NoExternalIP + } + } + + cfg := &WorkerPoolPrivatePoolV1Config{} + cfg.WorkerConfig = cfgWorkerConfig + cfg.NetworkConfig = cfgNetworkConfig + if cfg.PrivateServiceConnect != nil { + cfg.NetworkConfig = nil + cfg.PrivateServiceConnect = cfgPrivateServiceConnect + } + + r.WorkerConfig = nil + r.NetworkConfig = nil + r.PrivateServiceConnect = nil + return cfg +} + +// gaToBetaPrivatePool is populating beta specific values (WorkerConfig and NetworkConfig) and setting PrivatePoolV1Config to nil. +// r.PrivatePoolV1Config and c points to the same object. +func gaToBetaPrivatePool(r *WorkerPool, c *WorkerPoolPrivatePoolV1Config) *WorkerPoolPrivatePoolV1Config { + if c == nil { + return nil + } + + if c.WorkerConfig != nil && r.WorkerConfig == nil { + r.WorkerConfig = &WorkerPoolWorkerConfig{ + DiskSizeGb: c.WorkerConfig.DiskSizeGb, + MachineType: c.WorkerConfig.MachineType, + EnableNestedVirtualization: c.WorkerConfig.EnableNestedVirtualization, + } + if c.NetworkConfig != nil { + r.WorkerConfig.NoExternalIP = noExternalIPBoolean(c.NetworkConfig) + } + if c.PrivateServiceConnect != nil { + r.WorkerConfig.NoExternalIP = c.PrivateServiceConnect.PublicIPAddressDisabled + } + } + if c.NetworkConfig != nil && c.NetworkConfig.PeeredNetwork != nil && r.NetworkConfig == nil { + r.NetworkConfig = &WorkerPoolNetworkConfig{ + PeeredNetwork: c.NetworkConfig.PeeredNetwork, + PeeredNetworkIPRange: c.NetworkConfig.PeeredNetworkIPRange, + } + } + if c.PrivateServiceConnect != nil && r.PrivateServiceConnect != nil { + r.PrivateServiceConnect = &WorkerPoolPrivateServiceConnect{ + NetworkAttachment: c.PrivateServiceConnect.NetworkAttachment, + RouteAllTraffic: c.PrivateServiceConnect.RouteAllTraffic, + } + } + + r.PrivatePoolV1Config = nil + return nil +} + +func noExternalIPBoolean(networkConfig *WorkerPoolPrivatePoolV1ConfigNetworkConfig) *bool { + if networkConfig == nil || networkConfig.EgressOption == nil { + return nil + } + if string(*networkConfig.EgressOption) == "NO_PUBLIC_EGRESS" { + return dcl.Bool(true) + } + if string(*networkConfig.EgressOption) == "PUBLIC_EGRESS" { + return dcl.Bool(false) + } + return nil +} + +func noExternalIPEnum(noExternalIP *bool) *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + if noExternalIP == nil { + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef("EGRESS_OPTION_UNSPECIFIED") + } + if *noExternalIP { + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef("NO_PUBLIC_EGRESS") + } + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef("PUBLIC_EGRESS") +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go new file mode 100644 index 000000000000..ac4e19e756a8 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLCloudbuildClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.CloudBuildBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go new file mode 100644 index 000000000000..94789efcfb70 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go @@ -0,0 +1,573 @@ +package cloudbuild + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCloudbuildWorkerPool() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudbuildWorkerPoolCreate, + Read: resourceCloudbuildWorkerPoolRead, + Update: resourceCloudbuildWorkerPoolUpdate, + Delete: resourceCloudbuildWorkerPoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudbuildWorkerPoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "User-defined name of the `WorkerPool`.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "network_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Network configuration for the `WorkerPool`.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolNetworkConfigSchema(), + ConflictsWith: []string{"private_service_connect"}, + }, + + "private_service_connect": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Private Service Connect configuration for the pool.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolPrivateServiceConnectSchema(), + ConflictsWith: []string{"network_config"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration to be used for a creating workers in the `WorkerPool`.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolWorkerConfigSchema(), + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to create the `WorkerPool` was received.", + }, + + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to delete the `WorkerPool` was received.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. `WorkerPool` state. Possible values: STATE_UNSPECIFIED, PENDING, APPROVED, REJECTED, CANCELLED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A unique identifier for the `WorkerPool`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to update the `WorkerPool` was received.", + }, + }, + } +} + +func CloudbuildWorkerPoolNetworkConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "peered_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", + }, + + "peered_network_ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used.", + }, + }, + } +} + +func CloudbuildWorkerPoolPrivateServiceConnectSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_attachment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. Immutable. The network attachment that the worker network interface is connected to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments)", + }, + + "route_all_traffic": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface.", + }, + }, + } +} + +func CloudbuildWorkerPoolWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Description: "Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.", + }, + + "enable_nested_virtualization": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable nested virtualization on the worker, if supported by the machine type. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will set this to false.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: "Machine type of a worker, such as `n1-standard-1`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`.", + }, + + "no_external_ip": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "If true, workers are created without any public address, which prevents network egress to public IPs.", + }, + }, + } +} + +func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkerPool %q: %#v", d.Id(), res) + + return resourceCloudbuildWorkerPoolRead(d, meta) +} + +func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkerPool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("CloudbuildWorkerPool %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("network_config", flattenCloudbuildWorkerPoolNetworkConfig(res.NetworkConfig)); err != nil { + return fmt.Errorf("error setting network_config in state: %s", err) + } + if err = d.Set("private_service_connect", flattenCloudbuildWorkerPoolPrivateServiceConnect(res.PrivateServiceConnect)); err != nil { + return fmt.Errorf("error setting private_service_connect in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("worker_config", flattenCloudbuildWorkerPoolWorkerConfig(res.WorkerConfig)); err != nil { + return fmt.Errorf("error setting worker_config in state: %s", err) + } + if err = d.Set("annotations", flattenCloudbuildWorkerPoolAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("delete_time", res.DeleteTime); err != nil { + return fmt.Errorf("error setting delete_time in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceCloudbuildWorkerPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkerPool %q: %#v", d.Id(), res) + + return resourceCloudbuildWorkerPoolRead(d, meta) +} + +func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + log.Printf("[DEBUG] Deleting WorkerPool %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkerPool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting WorkerPool %q", d.Id()) + return nil +} + +func resourceCloudbuildWorkerPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/workerPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandCloudbuildWorkerPoolNetworkConfig(o interface{}) *WorkerPoolNetworkConfig { + if o == nil { + return EmptyWorkerPoolNetworkConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkerPoolNetworkConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkerPoolNetworkConfig{ + PeeredNetwork: dcl.String(obj["peered_network"].(string)), + PeeredNetworkIPRange: dcl.String(obj["peered_network_ip_range"].(string)), + } +} + +func flattenCloudbuildWorkerPoolNetworkConfig(obj *WorkerPoolNetworkConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "peered_network": obj.PeeredNetwork, + "peered_network_ip_range": obj.PeeredNetworkIPRange, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildWorkerPoolPrivateServiceConnect(o interface{}) *WorkerPoolPrivateServiceConnect { + if o == nil { + return EmptyWorkerPoolPrivateServiceConnect + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkerPoolPrivateServiceConnect + } + obj := objArr[0].(map[string]interface{}) + return &WorkerPoolPrivateServiceConnect{ + NetworkAttachment: dcl.String(obj["network_attachment"].(string)), + RouteAllTraffic: dcl.Bool(obj["route_all_traffic"].(bool)), + } +} + +func flattenCloudbuildWorkerPoolPrivateServiceConnect(obj *WorkerPoolPrivateServiceConnect) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "network_attachment": obj.NetworkAttachment, + "route_all_traffic": obj.RouteAllTraffic, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildWorkerPoolWorkerConfig(o interface{}) *WorkerPoolWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkerPoolWorkerConfig{ + DiskSizeGb: dcl.Int64(int64(obj["disk_size_gb"].(int))), + EnableNestedVirtualization: dcl.Bool(obj["enable_nested_virtualization"].(bool)), + MachineType: dcl.String(obj["machine_type"].(string)), + NoExternalIP: dcl.Bool(obj["no_external_ip"].(bool)), + } +} + +func flattenCloudbuildWorkerPoolWorkerConfig(obj *WorkerPoolWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disk_size_gb": obj.DiskSizeGb, + "enable_nested_virtualization": obj.EnableNestedVirtualization, + "machine_type": obj.MachineType, + "no_external_ip": obj.NoExternalIP, + } + + return []interface{}{transformed} + +} + +func flattenCloudbuildWorkerPoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml index 9ccad61d4411..dca7d6eaad31 100644 --- a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml +++ b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_cloudbuild_worker_pool' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'cloudbuild.googleapis.com' api_version: 'v1' api_resource_type_kind: 'WorkerPool' diff --git a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go new file mode 100644 index 000000000000..8576c3b4b5d6 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go @@ -0,0 +1,53 @@ +package cloudbuild + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("CloudbuildWorkerPool", testSweepCloudbuildWorkerPool) +} + +func testSweepCloudbuildWorkerPool(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for CloudbuildWorkerPool") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLCloudbuildClient(config, config.UserAgent, "", 0) + err = client.DeleteAllWorkerPool(context.Background(), d["project"], d["location"], isDeletableCloudbuildWorkerPool) + if err != nil { + return err + } + return nil +} + +func isDeletableCloudbuildWorkerPool(r *WorkerPool) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl new file mode 100644 index 000000000000..df7db9698429 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl @@ -0,0 +1,797 @@ +package cloudbuild + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type WorkerPool struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + Annotations map[string]string `json:"annotations"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + DeleteTime *string `json:"deleteTime"` + State *WorkerPoolStateEnum `json:"state"` + PrivatePoolV1Config *WorkerPoolPrivatePoolV1Config `json:"privatePoolV1Config"` + Etag *string `json:"etag"` + WorkerConfig *WorkerPoolWorkerConfig `json:"workerConfig"` + NetworkConfig *WorkerPoolNetworkConfig `json:"networkConfig"` + PrivateServiceConnect *WorkerPoolPrivateServiceConnect `json:"privateServiceConnect"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *WorkerPool) String() string { + return dcl.SprintResource(r) +} + +// The enum WorkerPoolStateEnum. +type WorkerPoolStateEnum string + +// WorkerPoolStateEnumRef returns a *WorkerPoolStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkerPoolStateEnumRef(s string) *WorkerPoolStateEnum { + v := WorkerPoolStateEnum(s) + return &v +} + +func (v WorkerPoolStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PENDING", "APPROVED", "REJECTED", "CANCELLED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkerPoolStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum. +type WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum string + +// WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef returns a *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef(s string) *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + v := WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(s) + return &v +} + +func (v WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EGRESS_OPTION_UNSPECIFIED", "NO_PUBLIC_EGRESS", "PUBLIC_EGRESS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum", + Value: string(v), + Valid: []string{}, + } +} + +type WorkerPoolPrivatePoolV1Config struct { + empty bool `json:"-"` + WorkerConfig *WorkerPoolPrivatePoolV1ConfigWorkerConfig `json:"workerConfig"` + NetworkConfig *WorkerPoolPrivatePoolV1ConfigNetworkConfig `json:"networkConfig"` + PrivateServiceConnect *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect `json:"privateServiceConnect"` +} + +type jsonWorkerPoolPrivatePoolV1Config WorkerPoolPrivatePoolV1Config + +func (r *WorkerPoolPrivatePoolV1Config) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1Config + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1Config + } else { + + r.WorkerConfig = res.WorkerConfig + + r.NetworkConfig = res.NetworkConfig + + r.PrivateServiceConnect = res.PrivateServiceConnect + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1Config is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1Config *WorkerPoolPrivatePoolV1Config = &WorkerPoolPrivatePoolV1Config{empty: true} + +func (r *WorkerPoolPrivatePoolV1Config) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1Config) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1Config) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivatePoolV1ConfigWorkerConfig struct { + empty bool `json:"-"` + MachineType *string `json:"machineType"` + DiskSizeGb *int64 `json:"diskSizeGb"` + EnableNestedVirtualization *bool `json:"enableNestedVirtualization"` +} + +type jsonWorkerPoolPrivatePoolV1ConfigWorkerConfig WorkerPoolPrivatePoolV1ConfigWorkerConfig + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1ConfigWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig + } else { + + r.MachineType = res.MachineType + + r.DiskSizeGb = res.DiskSizeGb + + r.EnableNestedVirtualization = res.EnableNestedVirtualization + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1ConfigWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig *WorkerPoolPrivatePoolV1ConfigWorkerConfig = &WorkerPoolPrivatePoolV1ConfigWorkerConfig{empty: true} + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivatePoolV1ConfigNetworkConfig struct { + empty bool `json:"-"` + PeeredNetwork *string `json:"peeredNetwork"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange"` + EgressOption *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum `json:"egressOption"` +} + +type jsonWorkerPoolPrivatePoolV1ConfigNetworkConfig WorkerPoolPrivatePoolV1ConfigNetworkConfig + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1ConfigNetworkConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig + } else { + + r.PeeredNetwork = res.PeeredNetwork + + r.PeeredNetworkIPRange = res.PeeredNetworkIPRange + + r.EgressOption = res.EgressOption + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1ConfigNetworkConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig *WorkerPoolPrivatePoolV1ConfigNetworkConfig = &WorkerPoolPrivatePoolV1ConfigNetworkConfig{empty: true} + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect struct { + empty bool `json:"-"` + NetworkAttachment *string `json:"networkAttachment"` + PublicIPAddressDisabled *bool `json:"publicIPAddressDisabled"` + RouteAllTraffic *bool `json:"routeAllTraffic"` +} + +type jsonWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + } else { + + r.NetworkAttachment = res.NetworkAttachment + + r.PublicIPAddressDisabled = res.PublicIPAddressDisabled + + r.RouteAllTraffic = res.RouteAllTraffic + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect = &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{empty: true} + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolWorkerConfig struct { + empty bool `json:"-"` + MachineType *string `json:"machineType"` + DiskSizeGb *int64 `json:"diskSizeGb"` + EnableNestedVirtualization *bool `json:"enableNestedVirtualization"` + NoExternalIP *bool `json:"noExternalIP"` +} + +type jsonWorkerPoolWorkerConfig WorkerPoolWorkerConfig + +func (r *WorkerPoolWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolWorkerConfig + } else { + + r.MachineType = res.MachineType + + r.DiskSizeGb = res.DiskSizeGb + + r.EnableNestedVirtualization = res.EnableNestedVirtualization + + r.NoExternalIP = res.NoExternalIP + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolWorkerConfig *WorkerPoolWorkerConfig = &WorkerPoolWorkerConfig{empty: true} + +func (r *WorkerPoolWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolNetworkConfig struct { + empty bool `json:"-"` + PeeredNetwork *string `json:"peeredNetwork"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange"` +} + +type jsonWorkerPoolNetworkConfig WorkerPoolNetworkConfig + +func (r *WorkerPoolNetworkConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolNetworkConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolNetworkConfig + } else { + + r.PeeredNetwork = res.PeeredNetwork + + r.PeeredNetworkIPRange = res.PeeredNetworkIPRange + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolNetworkConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolNetworkConfig *WorkerPoolNetworkConfig = &WorkerPoolNetworkConfig{empty: true} + +func (r *WorkerPoolNetworkConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolNetworkConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolNetworkConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivateServiceConnect struct { + empty bool `json:"-"` + NetworkAttachment *string `json:"networkAttachment"` + RouteAllTraffic *bool `json:"routeAllTraffic"` +} + +type jsonWorkerPoolPrivateServiceConnect WorkerPoolPrivateServiceConnect + +func (r *WorkerPoolPrivateServiceConnect) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivateServiceConnect + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivateServiceConnect + } else { + + r.NetworkAttachment = res.NetworkAttachment + + r.RouteAllTraffic = res.RouteAllTraffic + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivateServiceConnect is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivateServiceConnect *WorkerPoolPrivateServiceConnect = &WorkerPoolPrivateServiceConnect{empty: true} + +func (r *WorkerPoolPrivateServiceConnect) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivateServiceConnect) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivateServiceConnect) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *WorkerPool) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "cloud_build", + Type: "WorkerPool", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "cloudbuild", +{{- end }} + } +} + +func (r *WorkerPool) ID() (string, error) { + if err := extractWorkerPoolFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), + "state": dcl.ValueOrEmptyString(nr.State), + "private_pool_v1_config": dcl.ValueOrEmptyString(nr.PrivatePoolV1Config), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "worker_config": dcl.ValueOrEmptyString(nr.WorkerConfig), + "network_config": dcl.ValueOrEmptyString(nr.NetworkConfig), + "private_service_connect": dcl.ValueOrEmptyString(nr.PrivateServiceConnect), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const WorkerPoolMaxPage = -1 + +type WorkerPoolList struct { + Items []*WorkerPool + + nextToken string + + pageSize int32 + + resource *WorkerPool +} + +func (l *WorkerPoolList) HasNext() bool { + return l.nextToken != "" +} + +func (l *WorkerPoolList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listWorkerPool(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListWorkerPool(ctx context.Context, project, location string) (*WorkerPoolList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListWorkerPoolWithMaxResults(ctx, project, location, WorkerPoolMaxPage) + +} + +func (c *Client) ListWorkerPoolWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*WorkerPoolList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &WorkerPool{ + Project: &project, + Location: &location, + } + items, token, err := c.listWorkerPool(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &WorkerPoolList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetWorkerPool(ctx context.Context, r *WorkerPool) (*WorkerPool, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractWorkerPoolFields(r) + + b, err := c.getWorkerPoolRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalWorkerPool(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeWorkerPoolNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractWorkerPoolFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteWorkerPool(ctx context.Context, r *WorkerPool) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("WorkerPool resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting WorkerPool...") + deleteOp := deleteWorkerPoolOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllWorkerPool deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllWorkerPool(ctx context.Context, project, location string, filter func(*WorkerPool) bool) error { + listObj, err := c.ListWorkerPool(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllWorkerPool(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllWorkerPool(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyWorkerPool(ctx context.Context, rawDesired *WorkerPool, opts ...dcl.ApplyOption) (*WorkerPool, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *WorkerPool + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyWorkerPoolHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyWorkerPoolHelper(c *Client, ctx context.Context, rawDesired *WorkerPool, opts ...dcl.ApplyOption) (*WorkerPool, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyWorkerPool...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractWorkerPoolFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.workerPoolDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToWorkerPoolDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []workerPoolApiOperation + if create { + ops = append(ops, &createWorkerPoolOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyWorkerPoolDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyWorkerPoolDiff(c *Client, ctx context.Context, desired *WorkerPool, rawDesired *WorkerPool, ops []workerPoolApiOperation, opts ...dcl.ApplyOption) (*WorkerPool, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetWorkerPool(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createWorkerPoolOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapWorkerPool(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeWorkerPoolNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeWorkerPoolNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeWorkerPoolDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractWorkerPoolFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractWorkerPoolFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffWorkerPool(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl new file mode 100644 index 000000000000..e76775314eef --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl @@ -0,0 +1,3509 @@ +package cloudbuild + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *WorkerPool) validate() error { + + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"NetworkConfig", "PrivatePoolV1Config"}, r.NetworkConfig, r.PrivatePoolV1Config); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"WorkerConfig", "PrivatePoolV1Config"}, r.WorkerConfig, r.PrivatePoolV1Config); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"PrivateServiceConnect", "PrivatePoolV1Config"}, r.PrivateServiceConnect, r.PrivatePoolV1Config); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"PrivateServiceConnect", "NetworkConfig"}, r.PrivateServiceConnect, r.NetworkConfig); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.PrivatePoolV1Config) { + if err := r.PrivatePoolV1Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NetworkConfig) { + if err := r.NetworkConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PrivateServiceConnect) { + if err := r.PrivateServiceConnect.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkerPoolPrivatePoolV1Config) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"NetworkConfig", "PrivateServiceConnect"}, r.NetworkConfig, r.PrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NetworkConfig) { + if err := r.NetworkConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PrivateServiceConnect) { + if err := r.PrivateServiceConnect.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) validate() error { + return nil +} +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) validate() error { + return nil +} +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) validate() error { + return nil +} +func (r *WorkerPoolWorkerConfig) validate() error { + return nil +} +func (r *WorkerPoolNetworkConfig) validate() error { + if err := dcl.Required(r, "peeredNetwork"); err != nil { + return err + } + return nil +} +func (r *WorkerPoolPrivateServiceConnect) validate() error { + if err := dcl.Required(r, "networkAttachment"); err != nil { + return err + } + return nil +} +func (r *WorkerPool) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://cloudbuild.googleapis.com/v1/", params) +} + +func (r *WorkerPool) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *WorkerPool) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkerPool) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools?workerPoolId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkerPool) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// workerPoolApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type workerPoolApiOperation interface { + do(context.Context, *WorkerPool, *Client) error +} + +// newUpdateWorkerPoolUpdateWorkerPoolRequest creates a request for an +// WorkerPool resource's UpdateWorkerPool update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateWorkerPoolUpdateWorkerPoolRequest(ctx context.Context, f *WorkerPool, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1Config(c, f.PrivatePoolV1Config, res); err != nil { + return nil, fmt.Errorf("error expanding PrivatePoolV1Config into privatePoolV1Config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["privatePoolV1Config"] = v + } + if v, err := expandWorkerPoolWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["workerConfig"] = v + } + b, err := c.getWorkerPoolRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateWorkerPoolUpdateWorkerPoolRequest converts the update into +// the final JSON request body. +func marshalUpdateWorkerPoolUpdateWorkerPoolRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateWorkerPoolUpdateWorkerPoolOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateWorkerPoolUpdateWorkerPoolOperation) do(ctx context.Context, r *WorkerPool, c *Client) error { + _, err := c.GetWorkerPool(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateWorkerPool") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateWorkerPoolUpdateWorkerPoolRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateWorkerPoolUpdateWorkerPoolRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listWorkerPoolRaw(ctx context.Context, r *WorkerPool, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != WorkerPoolMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listWorkerPoolOperation struct { + WorkerPools []map[string]interface{} `json:"workerPools"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listWorkerPool(ctx context.Context, r *WorkerPool, pageToken string, pageSize int32) ([]*WorkerPool, string, error) { + b, err := c.listWorkerPoolRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listWorkerPoolOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*WorkerPool + for _, v := range m.WorkerPools { + res, err := unmarshalMapWorkerPool(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllWorkerPool(ctx context.Context, f func(*WorkerPool) bool, resources []*WorkerPool) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteWorkerPool(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteWorkerPoolOperation struct{} + +func (op *deleteWorkerPoolOperation) do(ctx context.Context, r *WorkerPool, c *Client) error { + r, err := c.GetWorkerPool(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "WorkerPool not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetWorkerPool checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetWorkerPool(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createWorkerPoolOperation struct { + response map[string]interface{} +} + +func (op *createWorkerPoolOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createWorkerPoolOperation) do(ctx context.Context, r *WorkerPool, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetWorkerPool(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getWorkerPoolRaw(ctx context.Context, r *WorkerPool) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) workerPoolDiffsForRawDesired(ctx context.Context, rawDesired *WorkerPool, opts ...dcl.ApplyOption) (initial, desired *WorkerPool, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *WorkerPool + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*WorkerPool); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected WorkerPool, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetWorkerPool(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a WorkerPool resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve WorkerPool resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that WorkerPool resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeWorkerPoolDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for WorkerPool: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for WorkerPool: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractWorkerPoolFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeWorkerPoolInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for WorkerPool: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeWorkerPoolDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for WorkerPool: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffWorkerPool(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeWorkerPoolInitialState(rawInitial, rawDesired *WorkerPool) (*WorkerPool, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.NetworkConfig) { + // Check if anything else is set. + if dcl.AnySet() { + rawInitial.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + if !dcl.IsZeroValue(rawInitial.PrivatePoolV1Config) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.NetworkConfig) { + rawInitial.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if !dcl.IsZeroValue(rawInitial.WorkerConfig) { + // Check if anything else is set. + if dcl.AnySet() { + rawInitial.WorkerConfig = EmptyWorkerPoolWorkerConfig + } + } + + if !dcl.IsZeroValue(rawInitial.PrivatePoolV1Config) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.WorkerConfig) { + rawInitial.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if !dcl.IsZeroValue(rawInitial.PrivateServiceConnect) { + // Check if anything else is set. + if dcl.AnySet() { + rawInitial.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if !dcl.IsZeroValue(rawInitial.PrivatePoolV1Config) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.PrivateServiceConnect) { + rawInitial.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if !dcl.IsZeroValue(rawInitial.PrivateServiceConnect) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.NetworkConfig) { + rawInitial.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if !dcl.IsZeroValue(rawInitial.NetworkConfig) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.PrivateServiceConnect) { + rawInitial.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeWorkerPoolDesiredState(rawDesired, rawInitial *WorkerPool, opts ...dcl.ApplyOption) (*WorkerPool, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.PrivatePoolV1Config = canonicalizeWorkerPoolPrivatePoolV1Config(rawDesired.PrivatePoolV1Config, nil, opts...) + rawDesired.WorkerConfig = canonicalizeWorkerPoolWorkerConfig(rawDesired.WorkerConfig, nil, opts...) + rawDesired.NetworkConfig = canonicalizeWorkerPoolNetworkConfig(rawDesired.NetworkConfig, nil, opts...) + rawDesired.PrivateServiceConnect = canonicalizeWorkerPoolPrivateServiceConnect(rawDesired.PrivateServiceConnect, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &WorkerPool{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + canonicalDesired.PrivatePoolV1Config = canonicalizeWorkerPoolPrivatePoolV1Config(rawDesired.PrivatePoolV1Config, rawInitial.PrivatePoolV1Config, opts...) + canonicalDesired.WorkerConfig = canonicalizeWorkerPoolWorkerConfig(rawDesired.WorkerConfig, rawInitial.WorkerConfig, opts...) + canonicalDesired.NetworkConfig = canonicalizeWorkerPoolNetworkConfig(rawDesired.NetworkConfig, rawInitial.NetworkConfig, opts...) + canonicalDesired.PrivateServiceConnect = canonicalizeWorkerPoolPrivateServiceConnect(rawDesired.PrivateServiceConnect, rawInitial.PrivateServiceConnect, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + + if canonicalDesired.NetworkConfig != nil { + // Check if anything else is set. + if dcl.AnySet() { + canonicalDesired.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + if canonicalDesired.PrivatePoolV1Config != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.NetworkConfig) { + canonicalDesired.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if canonicalDesired.WorkerConfig != nil { + // Check if anything else is set. + if dcl.AnySet() { + canonicalDesired.WorkerConfig = EmptyWorkerPoolWorkerConfig + } + } + + if canonicalDesired.PrivatePoolV1Config != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.WorkerConfig) { + canonicalDesired.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if canonicalDesired.PrivateServiceConnect != nil { + // Check if anything else is set. + if dcl.AnySet() { + canonicalDesired.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if canonicalDesired.PrivatePoolV1Config != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.PrivateServiceConnect) { + canonicalDesired.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if canonicalDesired.PrivateServiceConnect != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.NetworkConfig) { + canonicalDesired.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if canonicalDesired.NetworkConfig != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.PrivateServiceConnect) { + canonicalDesired.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + return canonicalDesired, nil +} + +func canonicalizeWorkerPoolNewState(c *Client, rawNew, rawDesired *WorkerPool) (*WorkerPool, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { + rawNew.DeleteTime = rawDesired.DeleteTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.PrivatePoolV1Config) && dcl.IsEmptyValueIndirect(rawDesired.PrivatePoolV1Config) { + rawNew.PrivatePoolV1Config = rawDesired.PrivatePoolV1Config + } else { + rawNew.PrivatePoolV1Config = canonicalizeNewWorkerPoolPrivatePoolV1Config(c, rawDesired.PrivatePoolV1Config, rawNew.PrivatePoolV1Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkerConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkerConfig) { + rawNew.WorkerConfig = rawDesired.WorkerConfig + } else { + rawNew.WorkerConfig = canonicalizeNewWorkerPoolWorkerConfig(c, rawDesired.WorkerConfig, rawNew.WorkerConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.NetworkConfig) && dcl.IsEmptyValueIndirect(rawDesired.NetworkConfig) { + rawNew.NetworkConfig = rawDesired.NetworkConfig + } else { + rawNew.NetworkConfig = canonicalizeNewWorkerPoolNetworkConfig(c, rawDesired.NetworkConfig, rawNew.NetworkConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.PrivateServiceConnect) && dcl.IsEmptyValueIndirect(rawDesired.PrivateServiceConnect) { + rawNew.PrivateServiceConnect = rawDesired.PrivateServiceConnect + } else { + rawNew.PrivateServiceConnect = canonicalizeNewWorkerPoolPrivateServiceConnect(c, rawDesired.PrivateServiceConnect, rawNew.PrivateServiceConnect) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeWorkerPoolPrivatePoolV1Config(des, initial *WorkerPoolPrivatePoolV1Config, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1Config { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.NetworkConfig != nil || (initial != nil && initial.NetworkConfig != nil) { + // Check if anything else is set. + if dcl.AnySet() { + des.NetworkConfig = nil + if initial != nil { + initial.NetworkConfig = nil + } + } + } + + if des.PrivateServiceConnect != nil || (initial != nil && initial.PrivateServiceConnect != nil) { + // Check if anything else is set. + if dcl.AnySet() { + des.PrivateServiceConnect = nil + if initial != nil { + initial.PrivateServiceConnect = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1Config{} + + cDes.WorkerConfig = canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(des.WorkerConfig, initial.WorkerConfig, opts...) + cDes.NetworkConfig = canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(des.NetworkConfig, initial.NetworkConfig, opts...) + cDes.PrivateServiceConnect = canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(des.PrivateServiceConnect, initial.PrivateServiceConnect, opts...) + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigSlice(des, initial []WorkerPoolPrivatePoolV1Config, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1Config { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1Config, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1Config(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1Config, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1Config(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1Config(c *Client, des, nw *WorkerPoolPrivatePoolV1Config) *WorkerPoolPrivatePoolV1Config { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1Config while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.WorkerConfig = canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, des.WorkerConfig, nw.WorkerConfig) + nw.NetworkConfig = canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, des.NetworkConfig, nw.NetworkConfig) + nw.PrivateServiceConnect = canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, des.PrivateServiceConnect, nw.PrivateServiceConnect) + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigSet(c *Client, des, nw []WorkerPoolPrivatePoolV1Config) []WorkerPoolPrivatePoolV1Config { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1Config + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1Config(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1Config) []WorkerPoolPrivatePoolV1Config { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1Config + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1Config(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(des, initial *WorkerPoolPrivatePoolV1ConfigWorkerConfig, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + if dcl.IsZeroValue(des.DiskSizeGb) || (dcl.IsEmptyValueIndirect(des.DiskSizeGb) && dcl.IsEmptyValueIndirect(initial.DiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DiskSizeGb = initial.DiskSizeGb + } else { + cDes.DiskSizeGb = des.DiskSizeGb + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, initial.EnableNestedVirtualization) || dcl.IsZeroValue(des.EnableNestedVirtualization) { + cDes.EnableNestedVirtualization = initial.EnableNestedVirtualization + } else { + cDes.EnableNestedVirtualization = des.EnableNestedVirtualization + } + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(des, initial []WorkerPoolPrivatePoolV1ConfigWorkerConfig, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1ConfigWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1ConfigWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c *Client, des, nw *WorkerPoolPrivatePoolV1ConfigWorkerConfig) *WorkerPoolPrivatePoolV1ConfigWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1ConfigWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, nw.EnableNestedVirtualization) { + nw.EnableNestedVirtualization = des.EnableNestedVirtualization + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfigSet(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigWorkerConfig) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1ConfigWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigWorkerConfig) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1ConfigWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(des, initial *WorkerPoolPrivatePoolV1ConfigNetworkConfig, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + + if dcl.IsZeroValue(des.PeeredNetwork) || (dcl.IsEmptyValueIndirect(des.PeeredNetwork) && dcl.IsEmptyValueIndirect(initial.PeeredNetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PeeredNetwork = initial.PeeredNetwork + } else { + cDes.PeeredNetwork = des.PeeredNetwork + } + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, initial.PeeredNetworkIPRange) || dcl.IsZeroValue(des.PeeredNetworkIPRange) { + cDes.PeeredNetworkIPRange = initial.PeeredNetworkIPRange + } else { + cDes.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + if dcl.IsZeroValue(des.EgressOption) || (dcl.IsEmptyValueIndirect(des.EgressOption) && dcl.IsEmptyValueIndirect(initial.EgressOption)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EgressOption = initial.EgressOption + } else { + cDes.EgressOption = des.EgressOption + } + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(des, initial []WorkerPoolPrivatePoolV1ConfigNetworkConfig, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c *Client, des, nw *WorkerPoolPrivatePoolV1ConfigNetworkConfig) *WorkerPoolPrivatePoolV1ConfigNetworkConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1ConfigNetworkConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, nw.PeeredNetworkIPRange) { + nw.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfigSet(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigNetworkConfig) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1ConfigNetworkConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigNetworkConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigNetworkConfig) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1ConfigNetworkConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(des, initial *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + + if dcl.IsZeroValue(des.NetworkAttachment) || (dcl.IsEmptyValueIndirect(des.NetworkAttachment) && dcl.IsEmptyValueIndirect(initial.NetworkAttachment)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NetworkAttachment = initial.NetworkAttachment + } else { + cDes.NetworkAttachment = des.NetworkAttachment + } + if dcl.BoolCanonicalize(des.PublicIPAddressDisabled, initial.PublicIPAddressDisabled) || dcl.IsZeroValue(des.PublicIPAddressDisabled) { + cDes.PublicIPAddressDisabled = initial.PublicIPAddressDisabled + } else { + cDes.PublicIPAddressDisabled = des.PublicIPAddressDisabled + } + if dcl.BoolCanonicalize(des.RouteAllTraffic, initial.RouteAllTraffic) || dcl.IsZeroValue(des.RouteAllTraffic) { + cDes.RouteAllTraffic = initial.RouteAllTraffic + } else { + cDes.RouteAllTraffic = des.RouteAllTraffic + } + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(des, initial []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c *Client, des, nw *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.PublicIPAddressDisabled, nw.PublicIPAddressDisabled) { + nw.PublicIPAddressDisabled = des.PublicIPAddressDisabled + } + if dcl.BoolCanonicalize(des.RouteAllTraffic, nw.RouteAllTraffic) { + nw.RouteAllTraffic = des.RouteAllTraffic + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSet(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolWorkerConfig(des, initial *WorkerPoolWorkerConfig, opts ...dcl.ApplyOption) *WorkerPoolWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolWorkerConfig{} + + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + if dcl.IsZeroValue(des.DiskSizeGb) || (dcl.IsEmptyValueIndirect(des.DiskSizeGb) && dcl.IsEmptyValueIndirect(initial.DiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DiskSizeGb = initial.DiskSizeGb + } else { + cDes.DiskSizeGb = des.DiskSizeGb + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, initial.EnableNestedVirtualization) || dcl.IsZeroValue(des.EnableNestedVirtualization) { + cDes.EnableNestedVirtualization = initial.EnableNestedVirtualization + } else { + cDes.EnableNestedVirtualization = des.EnableNestedVirtualization + } + if dcl.BoolCanonicalize(des.NoExternalIP, initial.NoExternalIP) || dcl.IsZeroValue(des.NoExternalIP) { + cDes.NoExternalIP = initial.NoExternalIP + } else { + cDes.NoExternalIP = des.NoExternalIP + } + + return cDes +} + +func canonicalizeWorkerPoolWorkerConfigSlice(des, initial []WorkerPoolWorkerConfig, opts ...dcl.ApplyOption) []WorkerPoolWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolWorkerConfig(c *Client, des, nw *WorkerPoolWorkerConfig) *WorkerPoolWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, nw.EnableNestedVirtualization) { + nw.EnableNestedVirtualization = des.EnableNestedVirtualization + } + if dcl.BoolCanonicalize(des.NoExternalIP, nw.NoExternalIP) { + nw.NoExternalIP = des.NoExternalIP + } + + return nw +} + +func canonicalizeNewWorkerPoolWorkerConfigSet(c *Client, des, nw []WorkerPoolWorkerConfig) []WorkerPoolWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolWorkerConfigSlice(c *Client, des, nw []WorkerPoolWorkerConfig) []WorkerPoolWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolNetworkConfig(des, initial *WorkerPoolNetworkConfig, opts ...dcl.ApplyOption) *WorkerPoolNetworkConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolNetworkConfig{} + + if dcl.IsZeroValue(des.PeeredNetwork) || (dcl.IsEmptyValueIndirect(des.PeeredNetwork) && dcl.IsEmptyValueIndirect(initial.PeeredNetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PeeredNetwork = initial.PeeredNetwork + } else { + cDes.PeeredNetwork = des.PeeredNetwork + } + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, initial.PeeredNetworkIPRange) || dcl.IsZeroValue(des.PeeredNetworkIPRange) { + cDes.PeeredNetworkIPRange = initial.PeeredNetworkIPRange + } else { + cDes.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + + return cDes +} + +func canonicalizeWorkerPoolNetworkConfigSlice(des, initial []WorkerPoolNetworkConfig, opts ...dcl.ApplyOption) []WorkerPoolNetworkConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolNetworkConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolNetworkConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolNetworkConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolNetworkConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolNetworkConfig(c *Client, des, nw *WorkerPoolNetworkConfig) *WorkerPoolNetworkConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolNetworkConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, nw.PeeredNetworkIPRange) { + nw.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + + return nw +} + +func canonicalizeNewWorkerPoolNetworkConfigSet(c *Client, des, nw []WorkerPoolNetworkConfig) []WorkerPoolNetworkConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolNetworkConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolNetworkConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolNetworkConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolNetworkConfigSlice(c *Client, des, nw []WorkerPoolNetworkConfig) []WorkerPoolNetworkConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolNetworkConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolNetworkConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivateServiceConnect(des, initial *WorkerPoolPrivateServiceConnect, opts ...dcl.ApplyOption) *WorkerPoolPrivateServiceConnect { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivateServiceConnect{} + + if dcl.IsZeroValue(des.NetworkAttachment) || (dcl.IsEmptyValueIndirect(des.NetworkAttachment) && dcl.IsEmptyValueIndirect(initial.NetworkAttachment)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NetworkAttachment = initial.NetworkAttachment + } else { + cDes.NetworkAttachment = des.NetworkAttachment + } + if dcl.BoolCanonicalize(des.RouteAllTraffic, initial.RouteAllTraffic) || dcl.IsZeroValue(des.RouteAllTraffic) { + cDes.RouteAllTraffic = initial.RouteAllTraffic + } else { + cDes.RouteAllTraffic = des.RouteAllTraffic + } + + return cDes +} + +func canonicalizeWorkerPoolPrivateServiceConnectSlice(des, initial []WorkerPoolPrivateServiceConnect, opts ...dcl.ApplyOption) []WorkerPoolPrivateServiceConnect { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivateServiceConnect, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivateServiceConnect(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivateServiceConnect, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivateServiceConnect(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivateServiceConnect(c *Client, des, nw *WorkerPoolPrivateServiceConnect) *WorkerPoolPrivateServiceConnect { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivateServiceConnect while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.RouteAllTraffic, nw.RouteAllTraffic) { + nw.RouteAllTraffic = des.RouteAllTraffic + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivateServiceConnectSet(c *Client, des, nw []WorkerPoolPrivateServiceConnect) []WorkerPoolPrivateServiceConnect { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivateServiceConnect + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivateServiceConnectNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivateServiceConnect(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivateServiceConnectSlice(c *Client, des, nw []WorkerPoolPrivateServiceConnect) []WorkerPoolPrivateServiceConnect { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivateServiceConnect + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivateServiceConnect(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffWorkerPool(c *Client, desired, actual *WorkerPool, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivatePoolV1Config, actual.PrivatePoolV1Config, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1Config, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivatePoolV1Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkerPoolWorkerConfigNewStyle, EmptyObject: EmptyWorkerPoolWorkerConfig, OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.NetworkConfig, actual.NetworkConfig, dcl.DiffInfo{ObjectFunction: compareWorkerPoolNetworkConfigNewStyle, EmptyObject: EmptyWorkerPoolNetworkConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateServiceConnect, actual.PrivateServiceConnect, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivateServiceConnectNewStyle, EmptyObject: EmptyWorkerPoolPrivateServiceConnect, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateServiceConnect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareWorkerPoolPrivatePoolV1ConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1Config) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1Config) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1Config or *WorkerPoolPrivatePoolV1Config", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1Config) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1Config) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1Config", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigWorkerConfigNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig, OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NetworkConfig, actual.NetworkConfig, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigNetworkConfigNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateServiceConnect, actual.PrivateServiceConnect, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateServiceConnect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivatePoolV1ConfigWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigWorkerConfig or *WorkerPoolPrivatePoolV1ConfigWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("MachineType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskSizeGb, actual.DiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("DiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableNestedVirtualization, actual.EnableNestedVirtualization, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("EnableNestedVirtualization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivatePoolV1ConfigNetworkConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigNetworkConfig or *WorkerPoolPrivatePoolV1ConfigNetworkConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigNetworkConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PeeredNetwork, actual.PeeredNetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetwork")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PeeredNetworkIPRange, actual.PeeredNetworkIPRange, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetworkIpRange")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EgressOption, actual.EgressOption, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("EgressOption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect or *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NetworkAttachment, actual.NetworkAttachment, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkAttachment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicIPAddressDisabled, actual.PublicIPAddressDisabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicIpAddressDisabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteAllTraffic, actual.RouteAllTraffic, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RouteAllTraffic")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolWorkerConfig or *WorkerPoolWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("MachineType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskSizeGb, actual.DiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("DiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableNestedVirtualization, actual.EnableNestedVirtualization, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("EnableNestedVirtualization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NoExternalIP, actual.NoExternalIP, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("NoExternalIp")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolNetworkConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolNetworkConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolNetworkConfig or *WorkerPoolNetworkConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolNetworkConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolNetworkConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PeeredNetwork, actual.PeeredNetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetwork")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PeeredNetworkIPRange, actual.PeeredNetworkIPRange, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetworkIpRange")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivateServiceConnectNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivateServiceConnect) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivateServiceConnect or *WorkerPoolPrivateServiceConnect", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivateServiceConnect) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivateServiceConnect", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NetworkAttachment, actual.NetworkAttachment, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkAttachment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteAllTraffic, actual.RouteAllTraffic, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RouteAllTraffic")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *WorkerPool) urlNormalized() *WorkerPool { + normalized := dcl.Copy(*r).(WorkerPool) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *WorkerPool) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateWorkerPool" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the WorkerPool resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *WorkerPool) marshal(c *Client) ([]byte, error) { + m, err := expandWorkerPool(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling WorkerPool: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalWorkerPool decodes JSON responses into the WorkerPool resource schema. +func unmarshalWorkerPool(b []byte, c *Client, res *WorkerPool) (*WorkerPool, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapWorkerPool(m, c, res) +} + +func unmarshalMapWorkerPool(m map[string]interface{}, c *Client, res *WorkerPool) (*WorkerPool, error) { + + flattened := flattenWorkerPool(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandWorkerPool expands WorkerPool into a JSON request object. +func expandWorkerPool(c *Client, f *WorkerPool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/workerPools/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1Config(c, f.PrivatePoolV1Config, res); err != nil { + return nil, fmt.Errorf("error expanding PrivatePoolV1Config into privatePoolV1Config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["privatePoolV1Config"] = v + } + if v, err := expandWorkerPoolWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandWorkerPoolNetworkConfig(c, f.NetworkConfig, res); err != nil { + return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networkConfig"] = v + } + if v, err := expandWorkerPoolPrivateServiceConnect(c, f.PrivateServiceConnect, res); err != nil { + return nil, fmt.Errorf("error expanding PrivateServiceConnect into privateServiceConnect: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["privateServiceConnect"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenWorkerPool flattens WorkerPool from a JSON request object into the +// WorkerPool type. +func flattenWorkerPool(c *Client, i interface{}, res *WorkerPool) *WorkerPool { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &WorkerPool{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) + resultRes.State = flattenWorkerPoolStateEnum(m["state"]) + resultRes.PrivatePoolV1Config = flattenWorkerPoolPrivatePoolV1Config(c, m["privatePoolV1Config"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.WorkerConfig = flattenWorkerPoolWorkerConfig(c, m["workerConfig"], res) + resultRes.NetworkConfig = flattenWorkerPoolNetworkConfig(c, m["networkConfig"], res) + resultRes.PrivateServiceConnect = flattenWorkerPoolPrivateServiceConnect(c, m["privateServiceConnect"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandWorkerPoolPrivatePoolV1ConfigMap expands the contents of WorkerPoolPrivatePoolV1Config into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigMap(c *Client, f map[string]WorkerPoolPrivatePoolV1Config, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1Config(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigSlice expands the contents of WorkerPoolPrivatePoolV1Config into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigSlice(c *Client, f []WorkerPoolPrivatePoolV1Config, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1Config(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigMap flattens the contents of WorkerPoolPrivatePoolV1Config from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1Config { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1Config{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1Config{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1Config) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1Config(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigSlice flattens the contents of WorkerPoolPrivatePoolV1Config from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1Config { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1Config{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1Config{} + } + + items := make([]WorkerPoolPrivatePoolV1Config, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1Config(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1Config expands an instance of WorkerPoolPrivatePoolV1Config into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1Config(c *Client, f *WorkerPoolPrivatePoolV1Config, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, f.NetworkConfig, res); err != nil { + return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networkConfig"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, f.PrivateServiceConnect, res); err != nil { + return nil, fmt.Errorf("error expanding PrivateServiceConnect into privateServiceConnect: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["privateServiceConnect"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1Config flattens an instance of WorkerPoolPrivatePoolV1Config from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1Config(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1Config { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1Config{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1Config + } + r.WorkerConfig = flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, m["workerConfig"], res) + r.NetworkConfig = flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, m["networkConfig"], res) + r.PrivateServiceConnect = flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, m["privateServiceConnect"], res) + + return r +} + +// expandWorkerPoolPrivatePoolV1ConfigWorkerConfigMap expands the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigWorkerConfigMap(c *Client, f map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice expands the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(c *Client, f []WorkerPoolPrivatePoolV1ConfigWorkerConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigMap flattens the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1ConfigWorkerConfig expands an instance of WorkerPoolPrivatePoolV1ConfigWorkerConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c *Client, f *WorkerPoolPrivatePoolV1ConfigWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineType"] = v + } + if v := f.DiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["diskSizeGb"] = v + } + if v := f.EnableNestedVirtualization; !dcl.IsEmptyValueIndirect(v) { + m["enableNestedVirtualization"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig flattens an instance of WorkerPoolPrivatePoolV1ConfigWorkerConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1ConfigWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig + } + r.MachineType = dcl.FlattenString(m["machineType"]) + r.DiskSizeGb = dcl.FlattenInteger(m["diskSizeGb"]) + r.EnableNestedVirtualization = dcl.FlattenBool(m["enableNestedVirtualization"]) + + return r +} + +// expandWorkerPoolPrivatePoolV1ConfigNetworkConfigMap expands the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigNetworkConfigMap(c *Client, f map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice expands the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(c *Client, f []WorkerPoolPrivatePoolV1ConfigNetworkConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigMap flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1ConfigNetworkConfig expands an instance of WorkerPoolPrivatePoolV1ConfigNetworkConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c *Client, f *WorkerPoolPrivatePoolV1ConfigNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.PeeredNetwork; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetwork"] = v + } + if v := f.PeeredNetworkIPRange; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetworkIpRange"] = v + } + if v := f.EgressOption; !dcl.IsEmptyValueIndirect(v) { + m["egressOption"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig flattens an instance of WorkerPoolPrivatePoolV1ConfigNetworkConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1ConfigNetworkConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig + } + r.PeeredNetwork = dcl.FlattenString(m["peeredNetwork"]) + r.PeeredNetworkIPRange = dcl.FlattenString(m["peeredNetworkIpRange"]) + r.EgressOption = flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(m["egressOption"]) + + return r +} + +// expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap expands the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap(c *Client, f map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice expands the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(c *Client, f []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap flattens the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect expands an instance of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c *Client, f *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NetworkAttachment; !dcl.IsEmptyValueIndirect(v) { + m["networkAttachment"] = v + } + if v := f.PublicIPAddressDisabled; !dcl.IsEmptyValueIndirect(v) { + m["publicIpAddressDisabled"] = v + } + if v := f.RouteAllTraffic; !dcl.IsEmptyValueIndirect(v) { + m["routeAllTraffic"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect flattens an instance of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + } + r.NetworkAttachment = dcl.FlattenString(m["networkAttachment"]) + r.PublicIPAddressDisabled = dcl.FlattenBool(m["publicIpAddressDisabled"]) + r.RouteAllTraffic = dcl.FlattenBool(m["routeAllTraffic"]) + + return r +} + +// expandWorkerPoolWorkerConfigMap expands the contents of WorkerPoolWorkerConfig into a JSON +// request object. +func expandWorkerPoolWorkerConfigMap(c *Client, f map[string]WorkerPoolWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolWorkerConfigSlice expands the contents of WorkerPoolWorkerConfig into a JSON +// request object. +func expandWorkerPoolWorkerConfigSlice(c *Client, f []WorkerPoolWorkerConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolWorkerConfigMap flattens the contents of WorkerPoolWorkerConfig from a JSON +// response object. +func flattenWorkerPoolWorkerConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolWorkerConfig{} + } + + items := make(map[string]WorkerPoolWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolWorkerConfigSlice flattens the contents of WorkerPoolWorkerConfig from a JSON +// response object. +func flattenWorkerPoolWorkerConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolWorkerConfig{} + } + + if len(a) == 0 { + return []WorkerPoolWorkerConfig{} + } + + items := make([]WorkerPoolWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolWorkerConfig expands an instance of WorkerPoolWorkerConfig into a JSON +// request object. +func expandWorkerPoolWorkerConfig(c *Client, f *WorkerPoolWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineType"] = v + } + if v := f.DiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["diskSizeGb"] = v + } + if v := f.EnableNestedVirtualization; !dcl.IsEmptyValueIndirect(v) { + m["enableNestedVirtualization"] = v + } + if v := f.NoExternalIP; !dcl.IsEmptyValueIndirect(v) { + m["noExternalIp"] = v + } + + return m, nil +} + +// flattenWorkerPoolWorkerConfig flattens an instance of WorkerPoolWorkerConfig from a JSON +// response object. +func flattenWorkerPoolWorkerConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolWorkerConfig + } + r.MachineType = dcl.FlattenString(m["machineType"]) + r.DiskSizeGb = dcl.FlattenInteger(m["diskSizeGb"]) + r.EnableNestedVirtualization = dcl.FlattenBool(m["enableNestedVirtualization"]) + r.NoExternalIP = dcl.FlattenBool(m["noExternalIp"]) + + return r +} + +// expandWorkerPoolNetworkConfigMap expands the contents of WorkerPoolNetworkConfig into a JSON +// request object. +func expandWorkerPoolNetworkConfigMap(c *Client, f map[string]WorkerPoolNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolNetworkConfigSlice expands the contents of WorkerPoolNetworkConfig into a JSON +// request object. +func expandWorkerPoolNetworkConfigSlice(c *Client, f []WorkerPoolNetworkConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolNetworkConfigMap flattens the contents of WorkerPoolNetworkConfig from a JSON +// response object. +func flattenWorkerPoolNetworkConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolNetworkConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolNetworkConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolNetworkConfig{} + } + + items := make(map[string]WorkerPoolNetworkConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolNetworkConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolNetworkConfigSlice flattens the contents of WorkerPoolNetworkConfig from a JSON +// response object. +func flattenWorkerPoolNetworkConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolNetworkConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolNetworkConfig{} + } + + if len(a) == 0 { + return []WorkerPoolNetworkConfig{} + } + + items := make([]WorkerPoolNetworkConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolNetworkConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolNetworkConfig expands an instance of WorkerPoolNetworkConfig into a JSON +// request object. +func expandWorkerPoolNetworkConfig(c *Client, f *WorkerPoolNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.PeeredNetwork; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetwork"] = v + } + if v := f.PeeredNetworkIPRange; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetworkIpRange"] = v + } + + return m, nil +} + +// flattenWorkerPoolNetworkConfig flattens an instance of WorkerPoolNetworkConfig from a JSON +// response object. +func flattenWorkerPoolNetworkConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolNetworkConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolNetworkConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolNetworkConfig + } + r.PeeredNetwork = dcl.FlattenString(m["peeredNetwork"]) + r.PeeredNetworkIPRange = dcl.FlattenString(m["peeredNetworkIpRange"]) + + return r +} + +// expandWorkerPoolPrivateServiceConnectMap expands the contents of WorkerPoolPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivateServiceConnectMap(c *Client, f map[string]WorkerPoolPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivateServiceConnectSlice expands the contents of WorkerPoolPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivateServiceConnectSlice(c *Client, f []WorkerPoolPrivateServiceConnect, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivateServiceConnectMap flattens the contents of WorkerPoolPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivateServiceConnectMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivateServiceConnect { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivateServiceConnect{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivateServiceConnect{} + } + + items := make(map[string]WorkerPoolPrivateServiceConnect) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivateServiceConnect(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivateServiceConnectSlice flattens the contents of WorkerPoolPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivateServiceConnectSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivateServiceConnect { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivateServiceConnect{} + } + + if len(a) == 0 { + return []WorkerPoolPrivateServiceConnect{} + } + + items := make([]WorkerPoolPrivateServiceConnect, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivateServiceConnect(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivateServiceConnect expands an instance of WorkerPoolPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivateServiceConnect(c *Client, f *WorkerPoolPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NetworkAttachment; !dcl.IsEmptyValueIndirect(v) { + m["networkAttachment"] = v + } + if v := f.RouteAllTraffic; !dcl.IsEmptyValueIndirect(v) { + m["routeAllTraffic"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivateServiceConnect flattens an instance of WorkerPoolPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivateServiceConnect(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivateServiceConnect { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivateServiceConnect{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivateServiceConnect + } + r.NetworkAttachment = dcl.FlattenString(m["networkAttachment"]) + r.RouteAllTraffic = dcl.FlattenBool(m["routeAllTraffic"]) + + return r +} + +// flattenWorkerPoolStateEnumMap flattens the contents of WorkerPoolStateEnum from a JSON +// response object. +func flattenWorkerPoolStateEnumMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolStateEnum{} + } + + if len(a) == 0 { + return map[string]WorkerPoolStateEnum{} + } + + items := make(map[string]WorkerPoolStateEnum) + for k, item := range a { + items[k] = *flattenWorkerPoolStateEnum(item.(interface{})) + } + + return items +} + +// flattenWorkerPoolStateEnumSlice flattens the contents of WorkerPoolStateEnum from a JSON +// response object. +func flattenWorkerPoolStateEnumSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolStateEnum{} + } + + if len(a) == 0 { + return []WorkerPoolStateEnum{} + } + + items := make([]WorkerPoolStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolStateEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkerPoolStateEnum asserts that an interface is a string, and returns a +// pointer to a *WorkerPoolStateEnum with the same value as that string. +func flattenWorkerPoolStateEnum(i interface{}) *WorkerPoolStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkerPoolStateEnumRef(s) +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumMap flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(item.(interface{})) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum asserts that an interface is a string, and returns a +// pointer to a *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum with the same value as that string. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(i interface{}) *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *WorkerPool) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalWorkerPool(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type workerPoolDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp workerPoolApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToWorkerPoolDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]workerPoolDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []workerPoolDiff + // For each operation name, create a workerPoolDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := workerPoolDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToWorkerPoolApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToWorkerPoolApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (workerPoolApiOperation, error) { + switch opName { + + case "updateWorkerPoolUpdateWorkerPoolOperation": + return &updateWorkerPoolUpdateWorkerPoolOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractWorkerPoolFields(r *WorkerPool) error { + if dcl.IsEmptyValueIndirect(r.PrivatePoolV1Config) { + r.PrivatePoolV1Config = betaToGaPrivatePool(r, r.PrivatePoolV1Config) + } + vPrivatePoolV1Config := r.PrivatePoolV1Config + if vPrivatePoolV1Config == nil { + // note: explicitly not the empty object. + vPrivatePoolV1Config = &WorkerPoolPrivatePoolV1Config{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigFields(r, vPrivatePoolV1Config); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivatePoolV1Config) { + r.PrivatePoolV1Config = vPrivatePoolV1Config + } + vWorkerConfig := r.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolWorkerConfig{} + } + if err := extractWorkerPoolWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + r.WorkerConfig = vWorkerConfig + } + vNetworkConfig := r.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolNetworkConfig{} + } + if err := extractWorkerPoolNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + r.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := r.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivateServiceConnect{} + } + if err := extractWorkerPoolPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + r.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1Config) error { + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vNetworkConfig := o.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + o.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := o.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + o.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigWorkerConfig) error { + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigNetworkConfig) error { + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) error { + return nil +} +func extractWorkerPoolWorkerConfigFields(r *WorkerPool, o *WorkerPoolWorkerConfig) error { + return nil +} +func extractWorkerPoolNetworkConfigFields(r *WorkerPool, o *WorkerPoolNetworkConfig) error { + return nil +} +func extractWorkerPoolPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivateServiceConnect) error { + return nil +} + +func postReadExtractWorkerPoolFields(r *WorkerPool) error { + + r.PrivatePoolV1Config = gaToBetaPrivatePool(r, r.PrivatePoolV1Config) + vPrivatePoolV1Config := r.PrivatePoolV1Config + if vPrivatePoolV1Config == nil { + // note: explicitly not the empty object. + vPrivatePoolV1Config = &WorkerPoolPrivatePoolV1Config{} + } + if err := postReadExtractWorkerPoolPrivatePoolV1ConfigFields(r, vPrivatePoolV1Config); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivatePoolV1Config) { + r.PrivatePoolV1Config = vPrivatePoolV1Config + } + vWorkerConfig := r.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolWorkerConfig{} + } + if err := postReadExtractWorkerPoolWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + r.WorkerConfig = vWorkerConfig + } + vNetworkConfig := r.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolNetworkConfig{} + } + if err := postReadExtractWorkerPoolNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + r.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := r.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivateServiceConnect{} + } + if err := postReadExtractWorkerPoolPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + r.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1Config) error { + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vNetworkConfig := o.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + o.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := o.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + o.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigWorkerConfig) error { + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigNetworkConfig) error { + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) error { + return nil +} +func postReadExtractWorkerPoolWorkerConfigFields(r *WorkerPool, o *WorkerPoolWorkerConfig) error { + return nil +} +func postReadExtractWorkerPoolNetworkConfigFields(r *WorkerPool, o *WorkerPoolNetworkConfig) error { + return nil +} +func postReadExtractWorkerPoolPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivateServiceConnect) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/client.go b/mmv1/third_party/terraform/services/clouddeploy/client.go new file mode 100644 index 000000000000..72b5cafe657f --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/client.go @@ -0,0 +1,18 @@ +package clouddeploy + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl new file mode 100644 index 000000000000..e7b98ee13ff5 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl @@ -0,0 +1,1653 @@ +package clouddeploy + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type DeliveryPipeline struct { + Name *string `json:"name"` + Uid *string `json:"uid"` + Description *string `json:"description"` + Annotations map[string]string `json:"annotations"` + Labels map[string]string `json:"labels"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + SerialPipeline *DeliveryPipelineSerialPipeline `json:"serialPipeline"` + Condition *DeliveryPipelineCondition `json:"condition"` + Etag *string `json:"etag"` + Project *string `json:"project"` + Location *string `json:"location"` + Suspended *bool `json:"suspended"` +} + +func (r *DeliveryPipeline) String() string { + return dcl.SprintResource(r) +} + +type DeliveryPipelineSerialPipeline struct { + empty bool `json:"-"` + Stages []DeliveryPipelineSerialPipelineStages `json:"stages"` +} + +type jsonDeliveryPipelineSerialPipeline DeliveryPipelineSerialPipeline + +func (r *DeliveryPipelineSerialPipeline) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipeline + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipeline + } else { + + r.Stages = res.Stages + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipeline is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipeline *DeliveryPipelineSerialPipeline = &DeliveryPipelineSerialPipeline{empty: true} + +func (r *DeliveryPipelineSerialPipeline) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipeline) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipeline) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStages struct { + empty bool `json:"-"` + TargetId *string `json:"targetId"` + Profiles []string `json:"profiles"` + Strategy *DeliveryPipelineSerialPipelineStagesStrategy `json:"strategy"` + DeployParameters []DeliveryPipelineSerialPipelineStagesDeployParameters `json:"deployParameters"` +} + +type jsonDeliveryPipelineSerialPipelineStages DeliveryPipelineSerialPipelineStages + +func (r *DeliveryPipelineSerialPipelineStages) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStages + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStages + } else { + + r.TargetId = res.TargetId + + r.Profiles = res.Profiles + + r.Strategy = res.Strategy + + r.DeployParameters = res.DeployParameters + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStages is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStages *DeliveryPipelineSerialPipelineStages = &DeliveryPipelineSerialPipelineStages{empty: true} + +func (r *DeliveryPipelineSerialPipelineStages) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStages) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStages) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategy struct { + empty bool `json:"-"` + Standard *DeliveryPipelineSerialPipelineStagesStrategyStandard `json:"standard"` + Canary *DeliveryPipelineSerialPipelineStagesStrategyCanary `json:"canary"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategy DeliveryPipelineSerialPipelineStagesStrategy + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategy + } else { + + r.Standard = res.Standard + + r.Canary = res.Canary + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategy *DeliveryPipelineSerialPipelineStagesStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandard struct { + empty bool `json:"-"` + Verify *bool `json:"verify"` + Predeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy `json:"predeploy"` + Postdeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy `json:"postdeploy"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandard DeliveryPipelineSerialPipelineStagesStrategyStandard + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandard + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } else { + + r.Verify = res.Verify + + r.Predeploy = res.Predeploy + + r.Postdeploy = res.Postdeploy + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandard is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard *DeliveryPipelineSerialPipelineStagesStrategyStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanary struct { + empty bool `json:"-"` + RuntimeConfig *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig `json:"runtimeConfig"` + CanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment `json:"canaryDeployment"` + CustomCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment `json:"customCanaryDeployment"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanary DeliveryPipelineSerialPipelineStagesStrategyCanary + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanary + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } else { + + r.RuntimeConfig = res.RuntimeConfig + + r.CanaryDeployment = res.CanaryDeployment + + r.CustomCanaryDeployment = res.CustomCanaryDeployment + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanary is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary *DeliveryPipelineSerialPipelineStagesStrategyCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig struct { + empty bool `json:"-"` + Kubernetes *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes `json:"kubernetes"` + CloudRun *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun `json:"cloudRun"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } else { + + r.Kubernetes = res.Kubernetes + + r.CloudRun = res.CloudRun + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes struct { + empty bool `json:"-"` + GatewayServiceMesh *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh `json:"gatewayServiceMesh"` + ServiceNetworking *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking `json:"serviceNetworking"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } else { + + r.GatewayServiceMesh = res.GatewayServiceMesh + + r.ServiceNetworking = res.ServiceNetworking + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh struct { + empty bool `json:"-"` + HttpRoute *string `json:"httpRoute"` + Service *string `json:"service"` + Deployment *string `json:"deployment"` + RouteUpdateWaitTime *string `json:"routeUpdateWaitTime"` + StableCutbackDuration *string `json:"stableCutbackDuration"` + PodSelectorLabel *string `json:"podSelectorLabel"` + RouteDestinations *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations `json:"routeDestinations"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } else { + + r.HttpRoute = res.HttpRoute + + r.Service = res.Service + + r.Deployment = res.Deployment + + r.RouteUpdateWaitTime = res.RouteUpdateWaitTime + + r.StableCutbackDuration = res.StableCutbackDuration + + r.PodSelectorLabel = res.PodSelectorLabel + + r.RouteDestinations = res.RouteDestinations + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations struct { + empty bool `json:"-"` + DestinationIds []string `json:"destinationIds"` + PropagateService *bool `json:"propagateService"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } else { + + r.DestinationIds = res.DestinationIds + + r.PropagateService = res.PropagateService + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking struct { + empty bool `json:"-"` + Service *string `json:"service"` + Deployment *string `json:"deployment"` + DisablePodOverprovisioning *bool `json:"disablePodOverprovisioning"` + PodSelectorLabel *string `json:"podSelectorLabel"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } else { + + r.Service = res.Service + + r.Deployment = res.Deployment + + r.DisablePodOverprovisioning = res.DisablePodOverprovisioning + + r.PodSelectorLabel = res.PodSelectorLabel + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun struct { + empty bool `json:"-"` + AutomaticTrafficControl *bool `json:"automaticTrafficControl"` + CanaryRevisionTags []string `json:"canaryRevisionTags"` + PriorRevisionTags []string `json:"priorRevisionTags"` + StableRevisionTags []string `json:"stableRevisionTags"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } else { + + r.AutomaticTrafficControl = res.AutomaticTrafficControl + + r.CanaryRevisionTags = res.CanaryRevisionTags + + r.PriorRevisionTags = res.PriorRevisionTags + + r.StableRevisionTags = res.StableRevisionTags + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment struct { + empty bool `json:"-"` + Percentages []int64 `json:"percentages"` + Verify *bool `json:"verify"` + Predeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy `json:"predeploy"` + Postdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy `json:"postdeploy"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } else { + + r.Percentages = res.Percentages + + r.Verify = res.Verify + + r.Predeploy = res.Predeploy + + r.Postdeploy = res.Postdeploy + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment struct { + empty bool `json:"-"` + PhaseConfigs []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs `json:"phaseConfigs"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } else { + + r.PhaseConfigs = res.PhaseConfigs + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs struct { + empty bool `json:"-"` + PhaseId *string `json:"phaseId"` + Percentage *int64 `json:"percentage"` + Profiles []string `json:"profiles"` + Verify *bool `json:"verify"` + Predeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy `json:"predeploy"` + Postdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy `json:"postdeploy"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } else { + + r.PhaseId = res.PhaseId + + r.Percentage = res.Percentage + + r.Profiles = res.Profiles + + r.Verify = res.Verify + + r.Predeploy = res.Predeploy + + r.Postdeploy = res.Postdeploy + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesDeployParameters struct { + empty bool `json:"-"` + Values map[string]string `json:"values"` + MatchTargetLabels map[string]string `json:"matchTargetLabels"` +} + +type jsonDeliveryPipelineSerialPipelineStagesDeployParameters DeliveryPipelineSerialPipelineStagesDeployParameters + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesDeployParameters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } else { + + r.Values = res.Values + + r.MatchTargetLabels = res.MatchTargetLabels + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesDeployParameters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesDeployParameters *DeliveryPipelineSerialPipelineStagesDeployParameters = &DeliveryPipelineSerialPipelineStagesDeployParameters{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineCondition struct { + empty bool `json:"-"` + PipelineReadyCondition *DeliveryPipelineConditionPipelineReadyCondition `json:"pipelineReadyCondition"` + TargetsPresentCondition *DeliveryPipelineConditionTargetsPresentCondition `json:"targetsPresentCondition"` + TargetsTypeCondition *DeliveryPipelineConditionTargetsTypeCondition `json:"targetsTypeCondition"` +} + +type jsonDeliveryPipelineCondition DeliveryPipelineCondition + +func (r *DeliveryPipelineCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineCondition + } else { + + r.PipelineReadyCondition = res.PipelineReadyCondition + + r.TargetsPresentCondition = res.TargetsPresentCondition + + r.TargetsTypeCondition = res.TargetsTypeCondition + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineCondition *DeliveryPipelineCondition = &DeliveryPipelineCondition{empty: true} + +func (r *DeliveryPipelineCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineConditionPipelineReadyCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + UpdateTime *string `json:"updateTime"` +} + +type jsonDeliveryPipelineConditionPipelineReadyCondition DeliveryPipelineConditionPipelineReadyCondition + +func (r *DeliveryPipelineConditionPipelineReadyCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionPipelineReadyCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionPipelineReadyCondition + } else { + + r.Status = res.Status + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionPipelineReadyCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionPipelineReadyCondition *DeliveryPipelineConditionPipelineReadyCondition = &DeliveryPipelineConditionPipelineReadyCondition{empty: true} + +func (r *DeliveryPipelineConditionPipelineReadyCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionPipelineReadyCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionPipelineReadyCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineConditionTargetsPresentCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + MissingTargets []string `json:"missingTargets"` + UpdateTime *string `json:"updateTime"` +} + +type jsonDeliveryPipelineConditionTargetsPresentCondition DeliveryPipelineConditionTargetsPresentCondition + +func (r *DeliveryPipelineConditionTargetsPresentCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionTargetsPresentCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionTargetsPresentCondition + } else { + + r.Status = res.Status + + r.MissingTargets = res.MissingTargets + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionTargetsPresentCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionTargetsPresentCondition *DeliveryPipelineConditionTargetsPresentCondition = &DeliveryPipelineConditionTargetsPresentCondition{empty: true} + +func (r *DeliveryPipelineConditionTargetsPresentCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionTargetsPresentCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionTargetsPresentCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineConditionTargetsTypeCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + ErrorDetails *string `json:"errorDetails"` +} + +type jsonDeliveryPipelineConditionTargetsTypeCondition DeliveryPipelineConditionTargetsTypeCondition + +func (r *DeliveryPipelineConditionTargetsTypeCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionTargetsTypeCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionTargetsTypeCondition + } else { + + r.Status = res.Status + + r.ErrorDetails = res.ErrorDetails + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionTargetsTypeCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionTargetsTypeCondition *DeliveryPipelineConditionTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{empty: true} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *DeliveryPipeline) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "clouddeploy", + Type: "DeliveryPipeline", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "clouddeploy", +{{- end }} + } +} + +func (r *DeliveryPipeline) ID() (string, error) { + if err := extractDeliveryPipelineFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "description": dcl.ValueOrEmptyString(nr.Description), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "serial_pipeline": dcl.ValueOrEmptyString(nr.SerialPipeline), + "condition": dcl.ValueOrEmptyString(nr.Condition), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "suspended": dcl.ValueOrEmptyString(nr.Suspended), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const DeliveryPipelineMaxPage = -1 + +type DeliveryPipelineList struct { + Items []*DeliveryPipeline + + nextToken string + + pageSize int32 + + resource *DeliveryPipeline +} + +func (l *DeliveryPipelineList) HasNext() bool { + return l.nextToken != "" +} + +func (l *DeliveryPipelineList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listDeliveryPipeline(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListDeliveryPipeline(ctx context.Context, project, location string) (*DeliveryPipelineList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListDeliveryPipelineWithMaxResults(ctx, project, location, DeliveryPipelineMaxPage) + +} + +func (c *Client) ListDeliveryPipelineWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*DeliveryPipelineList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &DeliveryPipeline{ + Project: &project, + Location: &location, + } + items, token, err := c.listDeliveryPipeline(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &DeliveryPipelineList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetDeliveryPipeline(ctx context.Context, r *DeliveryPipeline) (*DeliveryPipeline, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractDeliveryPipelineFields(r) + + b, err := c.getDeliveryPipelineRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalDeliveryPipeline(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeDeliveryPipelineNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractDeliveryPipelineFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteDeliveryPipeline(ctx context.Context, r *DeliveryPipeline) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("DeliveryPipeline resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting DeliveryPipeline...") + deleteOp := deleteDeliveryPipelineOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllDeliveryPipeline deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllDeliveryPipeline(ctx context.Context, project, location string, filter func(*DeliveryPipeline) bool) error { + listObj, err := c.ListDeliveryPipeline(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllDeliveryPipeline(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllDeliveryPipeline(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyDeliveryPipeline(ctx context.Context, rawDesired *DeliveryPipeline, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *DeliveryPipeline + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyDeliveryPipelineHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyDeliveryPipelineHelper(c *Client, ctx context.Context, rawDesired *DeliveryPipeline, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyDeliveryPipeline...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractDeliveryPipelineFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.deliveryPipelineDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToDeliveryPipelineDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []deliveryPipelineApiOperation + if create { + ops = append(ops, &createDeliveryPipelineOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyDeliveryPipelineDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyDeliveryPipelineDiff(c *Client, ctx context.Context, desired *DeliveryPipeline, rawDesired *DeliveryPipeline, ops []deliveryPipelineApiOperation, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetDeliveryPipeline(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createDeliveryPipelineOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapDeliveryPipeline(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeDeliveryPipelineNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeDeliveryPipelineNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeDeliveryPipelineDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractDeliveryPipelineFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractDeliveryPipelineFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffDeliveryPipeline(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *DeliveryPipeline) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go new file mode 100644 index 000000000000..d788ebb3c099 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go @@ -0,0 +1,9009 @@ +package clouddeploy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *DeliveryPipeline) validate() error { + + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.SerialPipeline) { + if err := r.SerialPipeline.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Condition) { + if err := r.Condition.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipeline) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStages) validate() error { + if !dcl.IsEmptyValueIndirect(r.Strategy) { + if err := r.Strategy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategy) validate() error { + if !dcl.IsEmptyValueIndirect(r.Standard) { + if err := r.Standard.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Canary) { + if err := r.Canary.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) validate() error { + if !dcl.IsEmptyValueIndirect(r.Predeploy) { + if err := r.Predeploy.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Postdeploy) { + if err := r.Postdeploy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"CanaryDeployment", "CustomCanaryDeployment"}, r.CanaryDeployment, r.CustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RuntimeConfig) { + if err := r.RuntimeConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CanaryDeployment) { + if err := r.CanaryDeployment.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CustomCanaryDeployment) { + if err := r.CustomCanaryDeployment.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Kubernetes", "CloudRun"}, r.Kubernetes, r.CloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Kubernetes) { + if err := r.Kubernetes.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CloudRun) { + if err := r.CloudRun.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GatewayServiceMesh", "ServiceNetworking"}, r.GatewayServiceMesh, r.ServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.GatewayServiceMesh) { + if err := r.GatewayServiceMesh.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ServiceNetworking) { + if err := r.ServiceNetworking.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) validate() error { + if err := dcl.Required(r, "httpRoute"); err != nil { + return err + } + if err := dcl.Required(r, "service"); err != nil { + return err + } + if err := dcl.Required(r, "deployment"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RouteDestinations) { + if err := r.RouteDestinations.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) validate() error { + if err := dcl.Required(r, "destinationIds"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + if err := dcl.Required(r, "deployment"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) validate() error { + if err := dcl.Required(r, "percentages"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Predeploy) { + if err := r.Predeploy.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Postdeploy) { + if err := r.Postdeploy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) validate() error { + if err := dcl.Required(r, "phaseConfigs"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) validate() error { + if err := dcl.Required(r, "phaseId"); err != nil { + return err + } + if err := dcl.Required(r, "percentage"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Predeploy) { + if err := r.Predeploy.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Postdeploy) { + if err := r.Postdeploy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) validate() error { + if err := dcl.Required(r, "values"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineCondition) validate() error { + if !dcl.IsEmptyValueIndirect(r.PipelineReadyCondition) { + if err := r.PipelineReadyCondition.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.TargetsPresentCondition) { + if err := r.TargetsPresentCondition.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.TargetsTypeCondition) { + if err := r.TargetsTypeCondition.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineConditionPipelineReadyCondition) validate() error { + return nil +} +func (r *DeliveryPipelineConditionTargetsPresentCondition) validate() error { + return nil +} +func (r *DeliveryPipelineConditionTargetsTypeCondition) validate() error { + return nil +} +func (r *DeliveryPipeline) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://clouddeploy.googleapis.com/v1/", params) +} + +func (r *DeliveryPipeline) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *DeliveryPipeline) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines", nr.basePath(), userBasePath, params), nil + +} + +func (r *DeliveryPipeline) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines?deliveryPipelineId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *DeliveryPipeline) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *DeliveryPipeline) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *DeliveryPipeline) SetPolicyVerb() string { + return "" +} + +func (r *DeliveryPipeline) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *DeliveryPipeline) IAMPolicyVersion() int { + return 3 +} + +// deliveryPipelineApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type deliveryPipelineApiOperation interface { + do(context.Context, *DeliveryPipeline, *Client) error +} + +// newUpdateDeliveryPipelineUpdateDeliveryPipelineRequest creates a request for an +// DeliveryPipeline resource's UpdateDeliveryPipeline update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(ctx context.Context, f *DeliveryPipeline, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v, err := expandDeliveryPipelineSerialPipeline(c, f.SerialPipeline, res); err != nil { + return nil, fmt.Errorf("error expanding SerialPipeline into serialPipeline: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["serialPipeline"] = v + } + if v, err := expandDeliveryPipelineCondition(c, f.Condition, res); err != nil { + return nil, fmt.Errorf("error expanding Condition into condition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["condition"] = v + } + if v := f.Suspended; !dcl.IsEmptyValueIndirect(v) { + req["suspended"] = v + } + b, err := c.getDeliveryPipelineRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/deliveryPipelines/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateDeliveryPipelineUpdateDeliveryPipelineRequest converts the update into +// the final JSON request body. +func marshalUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateDeliveryPipelineUpdateDeliveryPipelineOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateDeliveryPipelineUpdateDeliveryPipelineOperation) do(ctx context.Context, r *DeliveryPipeline, c *Client) error { + _, err := c.GetDeliveryPipeline(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateDeliveryPipeline") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listDeliveryPipelineRaw(ctx context.Context, r *DeliveryPipeline, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != DeliveryPipelineMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listDeliveryPipelineOperation struct { + DeliveryPipelines []map[string]interface{} `json:"deliveryPipelines"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listDeliveryPipeline(ctx context.Context, r *DeliveryPipeline, pageToken string, pageSize int32) ([]*DeliveryPipeline, string, error) { + b, err := c.listDeliveryPipelineRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listDeliveryPipelineOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*DeliveryPipeline + for _, v := range m.DeliveryPipelines { + res, err := unmarshalMapDeliveryPipeline(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllDeliveryPipeline(ctx context.Context, f func(*DeliveryPipeline) bool, resources []*DeliveryPipeline) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteDeliveryPipeline(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteDeliveryPipelineOperation struct{} + +func (op *deleteDeliveryPipelineOperation) do(ctx context.Context, r *DeliveryPipeline, c *Client) error { + r, err := c.GetDeliveryPipeline(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "DeliveryPipeline not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetDeliveryPipeline checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + u, err = dcl.AddQueryParams(u, map[string]string{"force": "true"}) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetDeliveryPipeline(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createDeliveryPipelineOperation struct { + response map[string]interface{} +} + +func (op *createDeliveryPipelineOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createDeliveryPipelineOperation) do(ctx context.Context, r *DeliveryPipeline, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetDeliveryPipeline(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getDeliveryPipelineRaw(ctx context.Context, r *DeliveryPipeline) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) deliveryPipelineDiffsForRawDesired(ctx context.Context, rawDesired *DeliveryPipeline, opts ...dcl.ApplyOption) (initial, desired *DeliveryPipeline, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *DeliveryPipeline + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*DeliveryPipeline); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected DeliveryPipeline, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetDeliveryPipeline(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a DeliveryPipeline resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve DeliveryPipeline resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that DeliveryPipeline resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeDeliveryPipelineDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for DeliveryPipeline: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for DeliveryPipeline: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractDeliveryPipelineFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeDeliveryPipelineInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for DeliveryPipeline: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeDeliveryPipelineDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for DeliveryPipeline: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffDeliveryPipeline(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeDeliveryPipelineInitialState(rawInitial, rawDesired *DeliveryPipeline) (*DeliveryPipeline, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeDeliveryPipelineDesiredState(rawDesired, rawInitial *DeliveryPipeline, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.SerialPipeline = canonicalizeDeliveryPipelineSerialPipeline(rawDesired.SerialPipeline, nil, opts...) + rawDesired.Condition = canonicalizeDeliveryPipelineCondition(rawDesired.Condition, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &DeliveryPipeline{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.SerialPipeline = canonicalizeDeliveryPipelineSerialPipeline(rawDesired.SerialPipeline, rawInitial.SerialPipeline, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.BoolCanonicalize(rawDesired.Suspended, rawInitial.Suspended) { + canonicalDesired.Suspended = rawInitial.Suspended + } else { + canonicalDesired.Suspended = rawDesired.Suspended + } + return canonicalDesired, nil +} + +func canonicalizeDeliveryPipelineNewState(c *Client, rawNew, rawDesired *DeliveryPipeline) (*DeliveryPipeline, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.SerialPipeline) && dcl.IsEmptyValueIndirect(rawDesired.SerialPipeline) { + rawNew.SerialPipeline = rawDesired.SerialPipeline + } else { + rawNew.SerialPipeline = canonicalizeNewDeliveryPipelineSerialPipeline(c, rawDesired.SerialPipeline, rawNew.SerialPipeline) + } + + if dcl.IsEmptyValueIndirect(rawNew.Condition) && dcl.IsEmptyValueIndirect(rawDesired.Condition) { + rawNew.Condition = rawDesired.Condition + } else { + rawNew.Condition = canonicalizeNewDeliveryPipelineCondition(c, rawDesired.Condition, rawNew.Condition) + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Suspended) && dcl.IsEmptyValueIndirect(rawDesired.Suspended) { + rawNew.Suspended = rawDesired.Suspended + } else { + if dcl.BoolCanonicalize(rawDesired.Suspended, rawNew.Suspended) { + rawNew.Suspended = rawDesired.Suspended + } + } + + return rawNew, nil +} + +func canonicalizeDeliveryPipelineSerialPipeline(des, initial *DeliveryPipelineSerialPipeline, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipeline { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipeline{} + + cDes.Stages = canonicalizeDeliveryPipelineSerialPipelineStagesSlice(des.Stages, initial.Stages, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineSlice(des, initial []DeliveryPipelineSerialPipeline, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipeline { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipeline, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipeline(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipeline, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipeline(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipeline(c *Client, des, nw *DeliveryPipelineSerialPipeline) *DeliveryPipelineSerialPipeline { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipeline while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Stages = canonicalizeNewDeliveryPipelineSerialPipelineStagesSlice(c, des.Stages, nw.Stages) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineSet(c *Client, des, nw []DeliveryPipelineSerialPipeline) []DeliveryPipelineSerialPipeline { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipeline + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipeline(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineSlice(c *Client, des, nw []DeliveryPipelineSerialPipeline) []DeliveryPipelineSerialPipeline { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipeline + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipeline(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStages(des, initial *DeliveryPipelineSerialPipelineStages, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStages { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStages{} + + if dcl.StringCanonicalize(des.TargetId, initial.TargetId) || dcl.IsZeroValue(des.TargetId) { + cDes.TargetId = initial.TargetId + } else { + cDes.TargetId = des.TargetId + } + if dcl.StringArrayCanonicalize(des.Profiles, initial.Profiles) { + cDes.Profiles = initial.Profiles + } else { + cDes.Profiles = des.Profiles + } + cDes.Strategy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(des.Strategy, initial.Strategy, opts...) + cDes.DeployParameters = canonicalizeDeliveryPipelineSerialPipelineStagesDeployParametersSlice(des.DeployParameters, initial.DeployParameters, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesSlice(des, initial []DeliveryPipelineSerialPipelineStages, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStages { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStages(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStages(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStages(c *Client, des, nw *DeliveryPipelineSerialPipelineStages) *DeliveryPipelineSerialPipelineStages { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStages while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.TargetId, nw.TargetId) { + nw.TargetId = des.TargetId + } + if dcl.StringArrayCanonicalize(des.Profiles, nw.Profiles) { + nw.Profiles = des.Profiles + } + nw.Strategy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, des.Strategy, nw.Strategy) + nw.DeployParameters = canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, des.DeployParameters, nw.DeployParameters) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStages) []DeliveryPipelineSerialPipelineStages { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStages + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStages(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStages) []DeliveryPipelineSerialPipelineStages { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStages + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStages(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(des, initial *DeliveryPipelineSerialPipelineStagesStrategy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategy{} + + cDes.Standard = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(des.Standard, initial.Standard, opts...) + cDes.Canary = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(des.Canary, initial.Canary, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategy) *DeliveryPipelineSerialPipelineStagesStrategy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Standard = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, des.Standard, nw.Standard) + nw.Canary = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, des.Canary, nw.Canary) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategy) []DeliveryPipelineSerialPipelineStagesStrategy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategy) []DeliveryPipelineSerialPipelineStagesStrategy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandard, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + cDes.Predeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(des.Predeploy, initial.Predeploy, opts...) + cDes.Postdeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(des.Postdeploy, initial.Postdeploy, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandard, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandard) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandard while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + nw.Predeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, des.Predeploy, nw.Predeploy) + nw.Postdeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, des.Postdeploy, nw.Postdeploy) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandard) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyStandard + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandard) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyStandard + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanary, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.CanaryDeployment != nil || (initial != nil && initial.CanaryDeployment != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CustomCanaryDeployment) { + des.CanaryDeployment = nil + if initial != nil { + initial.CanaryDeployment = nil + } + } + } + + if des.CustomCanaryDeployment != nil || (initial != nil && initial.CustomCanaryDeployment != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CanaryDeployment) { + des.CustomCanaryDeployment = nil + if initial != nil { + initial.CustomCanaryDeployment = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + + cDes.RuntimeConfig = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(des.RuntimeConfig, initial.RuntimeConfig, opts...) + cDes.CanaryDeployment = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(des.CanaryDeployment, initial.CanaryDeployment, opts...) + cDes.CustomCanaryDeployment = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(des.CustomCanaryDeployment, initial.CustomCanaryDeployment, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanary, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanary) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanary while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.RuntimeConfig = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, des.RuntimeConfig, nw.RuntimeConfig) + nw.CanaryDeployment = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, des.CanaryDeployment, nw.CanaryDeployment) + nw.CustomCanaryDeployment = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, des.CustomCanaryDeployment, nw.CustomCanaryDeployment) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanarySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanary) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanary + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanary) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanary + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.Kubernetes != nil || (initial != nil && initial.Kubernetes != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CloudRun) { + des.Kubernetes = nil + if initial != nil { + initial.Kubernetes = nil + } + } + } + + if des.CloudRun != nil || (initial != nil && initial.CloudRun != nil) { + // Check if anything else is set. + if dcl.AnySet(des.Kubernetes) { + des.CloudRun = nil + if initial != nil { + initial.CloudRun = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + + cDes.Kubernetes = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(des.Kubernetes, initial.Kubernetes, opts...) + cDes.CloudRun = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(des.CloudRun, initial.CloudRun, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Kubernetes = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, des.Kubernetes, nw.Kubernetes) + nw.CloudRun = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, des.CloudRun, nw.CloudRun) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.GatewayServiceMesh != nil || (initial != nil && initial.GatewayServiceMesh != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ServiceNetworking) { + des.GatewayServiceMesh = nil + if initial != nil { + initial.GatewayServiceMesh = nil + } + } + } + + if des.ServiceNetworking != nil || (initial != nil && initial.ServiceNetworking != nil) { + // Check if anything else is set. + if dcl.AnySet(des.GatewayServiceMesh) { + des.ServiceNetworking = nil + if initial != nil { + initial.ServiceNetworking = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + + cDes.GatewayServiceMesh = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(des.GatewayServiceMesh, initial.GatewayServiceMesh, opts...) + cDes.ServiceNetworking = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(des.ServiceNetworking, initial.ServiceNetworking, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GatewayServiceMesh = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, des.GatewayServiceMesh, nw.GatewayServiceMesh) + nw.ServiceNetworking = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, des.ServiceNetworking, nw.ServiceNetworking) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + + if dcl.StringCanonicalize(des.HttpRoute, initial.HttpRoute) || dcl.IsZeroValue(des.HttpRoute) { + cDes.HttpRoute = initial.HttpRoute + } else { + cDes.HttpRoute = des.HttpRoute + } + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, initial.Deployment) || dcl.IsZeroValue(des.Deployment) { + cDes.Deployment = initial.Deployment + } else { + cDes.Deployment = des.Deployment + } + if dcl.StringCanonicalize(des.RouteUpdateWaitTime, initial.RouteUpdateWaitTime) || dcl.IsZeroValue(des.RouteUpdateWaitTime) { + cDes.RouteUpdateWaitTime = initial.RouteUpdateWaitTime + } else { + cDes.RouteUpdateWaitTime = des.RouteUpdateWaitTime + } + if dcl.StringCanonicalize(des.StableCutbackDuration, initial.StableCutbackDuration) || dcl.IsZeroValue(des.StableCutbackDuration) { + cDes.StableCutbackDuration = initial.StableCutbackDuration + } else { + cDes.StableCutbackDuration = des.StableCutbackDuration + } + if dcl.StringCanonicalize(des.PodSelectorLabel, initial.PodSelectorLabel) || dcl.IsZeroValue(des.PodSelectorLabel) { + cDes.PodSelectorLabel = initial.PodSelectorLabel + } else { + cDes.PodSelectorLabel = des.PodSelectorLabel + } + cDes.RouteDestinations = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(des.RouteDestinations, initial.RouteDestinations, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.HttpRoute, nw.HttpRoute) { + nw.HttpRoute = des.HttpRoute + } + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, nw.Deployment) { + nw.Deployment = des.Deployment + } + if dcl.StringCanonicalize(des.RouteUpdateWaitTime, nw.RouteUpdateWaitTime) { + nw.RouteUpdateWaitTime = des.RouteUpdateWaitTime + } + if dcl.StringCanonicalize(des.StableCutbackDuration, nw.StableCutbackDuration) { + nw.StableCutbackDuration = des.StableCutbackDuration + } + if dcl.StringCanonicalize(des.PodSelectorLabel, nw.PodSelectorLabel) { + nw.PodSelectorLabel = des.PodSelectorLabel + } + nw.RouteDestinations = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, des.RouteDestinations, nw.RouteDestinations) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + + if dcl.StringArrayCanonicalize(des.DestinationIds, initial.DestinationIds) { + cDes.DestinationIds = initial.DestinationIds + } else { + cDes.DestinationIds = des.DestinationIds + } + if dcl.BoolCanonicalize(des.PropagateService, initial.PropagateService) || dcl.IsZeroValue(des.PropagateService) { + cDes.PropagateService = initial.PropagateService + } else { + cDes.PropagateService = des.PropagateService + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.DestinationIds, nw.DestinationIds) { + nw.DestinationIds = des.DestinationIds + } + if dcl.BoolCanonicalize(des.PropagateService, nw.PropagateService) { + nw.PropagateService = des.PropagateService + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, initial.Deployment) || dcl.IsZeroValue(des.Deployment) { + cDes.Deployment = initial.Deployment + } else { + cDes.Deployment = des.Deployment + } + if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, initial.DisablePodOverprovisioning) || dcl.IsZeroValue(des.DisablePodOverprovisioning) { + cDes.DisablePodOverprovisioning = initial.DisablePodOverprovisioning + } else { + cDes.DisablePodOverprovisioning = des.DisablePodOverprovisioning + } + if dcl.StringCanonicalize(des.PodSelectorLabel, initial.PodSelectorLabel) || dcl.IsZeroValue(des.PodSelectorLabel) { + cDes.PodSelectorLabel = initial.PodSelectorLabel + } else { + cDes.PodSelectorLabel = des.PodSelectorLabel + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, nw.Deployment) { + nw.Deployment = des.Deployment + } + if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, nw.DisablePodOverprovisioning) { + nw.DisablePodOverprovisioning = des.DisablePodOverprovisioning + } + if dcl.StringCanonicalize(des.PodSelectorLabel, nw.PodSelectorLabel) { + nw.PodSelectorLabel = des.PodSelectorLabel + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + + if dcl.BoolCanonicalize(des.AutomaticTrafficControl, initial.AutomaticTrafficControl) || dcl.IsZeroValue(des.AutomaticTrafficControl) { + cDes.AutomaticTrafficControl = initial.AutomaticTrafficControl + } else { + cDes.AutomaticTrafficControl = des.AutomaticTrafficControl + } + if dcl.StringArrayCanonicalize(des.CanaryRevisionTags, initial.CanaryRevisionTags) { + cDes.CanaryRevisionTags = initial.CanaryRevisionTags + } else { + cDes.CanaryRevisionTags = des.CanaryRevisionTags + } + if dcl.StringArrayCanonicalize(des.PriorRevisionTags, initial.PriorRevisionTags) { + cDes.PriorRevisionTags = initial.PriorRevisionTags + } else { + cDes.PriorRevisionTags = des.PriorRevisionTags + } + if dcl.StringArrayCanonicalize(des.StableRevisionTags, initial.StableRevisionTags) { + cDes.StableRevisionTags = initial.StableRevisionTags + } else { + cDes.StableRevisionTags = des.StableRevisionTags + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutomaticTrafficControl, nw.AutomaticTrafficControl) { + nw.AutomaticTrafficControl = des.AutomaticTrafficControl + } + if dcl.StringArrayCanonicalize(des.CanaryRevisionTags, nw.CanaryRevisionTags) { + nw.CanaryRevisionTags = des.CanaryRevisionTags + } + if dcl.StringArrayCanonicalize(des.PriorRevisionTags, nw.PriorRevisionTags) { + nw.PriorRevisionTags = des.PriorRevisionTags + } + if dcl.StringArrayCanonicalize(des.StableRevisionTags, nw.StableRevisionTags) { + nw.StableRevisionTags = des.StableRevisionTags + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + + if dcl.IsZeroValue(des.Percentages) || (dcl.IsEmptyValueIndirect(des.Percentages) && dcl.IsEmptyValueIndirect(initial.Percentages)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Percentages = initial.Percentages + } else { + cDes.Percentages = des.Percentages + } + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + cDes.Predeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(des.Predeploy, initial.Predeploy, opts...) + cDes.Postdeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(des.Postdeploy, initial.Postdeploy, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + nw.Predeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, des.Predeploy, nw.Predeploy) + nw.Postdeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, des.Postdeploy, nw.Postdeploy) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + + cDes.PhaseConfigs = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(des.PhaseConfigs, initial.PhaseConfigs, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.PhaseConfigs = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, des.PhaseConfigs, nw.PhaseConfigs) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + + if dcl.StringCanonicalize(des.PhaseId, initial.PhaseId) || dcl.IsZeroValue(des.PhaseId) { + cDes.PhaseId = initial.PhaseId + } else { + cDes.PhaseId = des.PhaseId + } + if dcl.IsZeroValue(des.Percentage) || (dcl.IsEmptyValueIndirect(des.Percentage) && dcl.IsEmptyValueIndirect(initial.Percentage)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Percentage = initial.Percentage + } else { + cDes.Percentage = des.Percentage + } + if dcl.StringArrayCanonicalize(des.Profiles, initial.Profiles) { + cDes.Profiles = initial.Profiles + } else { + cDes.Profiles = des.Profiles + } + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + cDes.Predeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(des.Predeploy, initial.Predeploy, opts...) + cDes.Postdeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(des.Postdeploy, initial.Postdeploy, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PhaseId, nw.PhaseId) { + nw.PhaseId = des.PhaseId + } + if dcl.StringArrayCanonicalize(des.Profiles, nw.Profiles) { + nw.Profiles = des.Profiles + } + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + nw.Predeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, des.Predeploy, nw.Predeploy) + nw.Postdeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, des.Postdeploy, nw.Postdeploy) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(des, initial *DeliveryPipelineSerialPipelineStagesDeployParameters, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesDeployParameters{} + + if dcl.IsZeroValue(des.Values) || (dcl.IsEmptyValueIndirect(des.Values) && dcl.IsEmptyValueIndirect(initial.Values)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + if dcl.IsZeroValue(des.MatchTargetLabels) || (dcl.IsEmptyValueIndirect(des.MatchTargetLabels) && dcl.IsEmptyValueIndirect(initial.MatchTargetLabels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MatchTargetLabels = initial.MatchTargetLabels + } else { + cDes.MatchTargetLabels = des.MatchTargetLabels + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesDeployParametersSlice(des, initial []DeliveryPipelineSerialPipelineStagesDeployParameters, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesDeployParameters) *DeliveryPipelineSerialPipelineStagesDeployParameters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesDeployParameters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesDeployParameters) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesDeployParameters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesDeployParameters) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesDeployParameters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineCondition(des, initial *DeliveryPipelineCondition, opts ...dcl.ApplyOption) *DeliveryPipelineCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineCondition{} + + cDes.PipelineReadyCondition = canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des.PipelineReadyCondition, initial.PipelineReadyCondition, opts...) + cDes.TargetsPresentCondition = canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des.TargetsPresentCondition, initial.TargetsPresentCondition, opts...) + cDes.TargetsTypeCondition = canonicalizeDeliveryPipelineConditionTargetsTypeCondition(des.TargetsTypeCondition, initial.TargetsTypeCondition, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineConditionSlice(des, initial []DeliveryPipelineCondition, opts ...dcl.ApplyOption) []DeliveryPipelineCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineCondition(c *Client, des, nw *DeliveryPipelineCondition) *DeliveryPipelineCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.PipelineReadyCondition = canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, des.PipelineReadyCondition, nw.PipelineReadyCondition) + nw.TargetsPresentCondition = canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, des.TargetsPresentCondition, nw.TargetsPresentCondition) + nw.TargetsTypeCondition = canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, des.TargetsTypeCondition, nw.TargetsTypeCondition) + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionSet(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionSlice(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des, initial *DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionPipelineReadyCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionPipelineReadyConditionSlice(des, initial []DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionPipelineReadyCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c *Client, des, nw *DeliveryPipelineConditionPipelineReadyCondition) *DeliveryPipelineConditionPipelineReadyCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionPipelineReadyCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSet(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionPipelineReadyCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionPipelineReadyCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des, initial *DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionTargetsPresentCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.StringArrayCanonicalize(des.MissingTargets, initial.MissingTargets) { + cDes.MissingTargets = initial.MissingTargets + } else { + cDes.MissingTargets = des.MissingTargets + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionTargetsPresentConditionSlice(des, initial []DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsPresentCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsPresentCondition) *DeliveryPipelineConditionTargetsPresentCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsPresentCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + if dcl.StringArrayCanonicalize(des.MissingTargets, nw.MissingTargets) { + nw.MissingTargets = des.MissingTargets + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionTargetsPresentCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionTargetsPresentCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionTargetsTypeCondition(des, initial *DeliveryPipelineConditionTargetsTypeCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionTargetsTypeCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.StringCanonicalize(des.ErrorDetails, initial.ErrorDetails) || dcl.IsZeroValue(des.ErrorDetails) { + cDes.ErrorDetails = initial.ErrorDetails + } else { + cDes.ErrorDetails = des.ErrorDetails + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionTargetsTypeConditionSlice(des, initial []DeliveryPipelineConditionTargetsTypeCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsTypeCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsTypeCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsTypeCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsTypeCondition) *DeliveryPipelineConditionTargetsTypeCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsTypeCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + if dcl.StringCanonicalize(des.ErrorDetails, nw.ErrorDetails) { + nw.ErrorDetails = des.ErrorDetails + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsTypeCondition) []DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionTargetsTypeCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionTargetsTypeConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsTypeCondition) []DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionTargetsTypeCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffDeliveryPipeline(c *Client, desired, actual *DeliveryPipeline, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SerialPipeline, actual.SerialPipeline, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipeline, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("SerialPipeline")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Condition, actual.Condition, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareDeliveryPipelineConditionNewStyle, EmptyObject: EmptyDeliveryPipelineCondition, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Condition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Suspended, actual.Suspended, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Suspended")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareDeliveryPipelineSerialPipelineNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipeline) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipeline) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline or *DeliveryPipelineSerialPipeline", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipeline) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipeline) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Stages, actual.Stages, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStages, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Stages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStages) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStages) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages or *DeliveryPipelineSerialPipelineStages", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStages) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStages) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetId, actual.TargetId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Strategy, actual.Strategy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Strategy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeployParameters, actual.DeployParameters, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesDeployParameters, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DeployParameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategy or *DeliveryPipelineSerialPipelineStagesStrategy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Standard, actual.Standard, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Standard")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Canary, actual.Canary, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Canary")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandard or *DeliveryPipelineSerialPipelineStagesStrategyStandard", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandard", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Predeploy, actual.Predeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Predeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Postdeploy, actual.Postdeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Postdeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy or *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy or *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanary or *DeliveryPipelineSerialPipelineStagesStrategyCanary", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanary", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.RuntimeConfig, actual.RuntimeConfig, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RuntimeConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CanaryDeployment, actual.CanaryDeployment, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CanaryDeployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CustomCanaryDeployment, actual.CustomCanaryDeployment, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CustomCanaryDeployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Kubernetes, actual.Kubernetes, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Kubernetes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CloudRun, actual.CloudRun, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CloudRun")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GatewayServiceMesh, actual.GatewayServiceMesh, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("GatewayServiceMesh")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceNetworking, actual.ServiceNetworking, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("ServiceNetworking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HttpRoute, actual.HttpRoute, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("HttpRoute")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Deployment, actual.Deployment, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Deployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteUpdateWaitTime, actual.RouteUpdateWaitTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RouteUpdateWaitTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StableCutbackDuration, actual.StableCutbackDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("StableCutbackDuration")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodSelectorLabel, actual.PodSelectorLabel, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PodSelectorLabel")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteDestinations, actual.RouteDestinations, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RouteDestinations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DestinationIds, actual.DestinationIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DestinationIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PropagateService, actual.PropagateService, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PropagateService")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Deployment, actual.Deployment, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Deployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisablePodOverprovisioning, actual.DisablePodOverprovisioning, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DisablePodOverprovisioning")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodSelectorLabel, actual.PodSelectorLabel, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PodSelectorLabel")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutomaticTrafficControl, actual.AutomaticTrafficControl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("AutomaticTrafficControl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CanaryRevisionTags, actual.CanaryRevisionTags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CanaryRevisionTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PriorRevisionTags, actual.PriorRevisionTags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PriorRevisionTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StableRevisionTags, actual.StableRevisionTags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("StableRevisionTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Percentages, actual.Percentages, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Percentages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Predeploy, actual.Predeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Predeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Postdeploy, actual.Postdeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Postdeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PhaseConfigs, actual.PhaseConfigs, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PhaseConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PhaseId, actual.PhaseId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PhaseId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Percentage, actual.Percentage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Percentage")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Predeploy, actual.Predeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Predeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Postdeploy, actual.Postdeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Postdeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesDeployParameters or *DeliveryPipelineSerialPipelineStagesDeployParameters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesDeployParameters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MatchTargetLabels, actual.MatchTargetLabels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MatchTargetLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition or *DeliveryPipelineCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PipelineReadyCondition, actual.PipelineReadyCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionPipelineReadyConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionPipelineReadyCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PipelineReadyCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetsPresentCondition, actual.TargetsPresentCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsPresentConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsPresentCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsPresentCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetsTypeCondition, actual.TargetsTypeCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsTypeConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsTypeCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsTypeCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition or *DeliveryPipelineConditionPipelineReadyCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition or *DeliveryPipelineConditionTargetsPresentCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MissingTargets, actual.MissingTargets, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MissingTargets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionTargetsTypeConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsTypeCondition or *DeliveryPipelineConditionTargetsTypeCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsTypeCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ErrorDetails, actual.ErrorDetails, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("ErrorDetails")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *DeliveryPipeline) urlNormalized() *DeliveryPipeline { + normalized := dcl.Copy(*r).(DeliveryPipeline) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *DeliveryPipeline) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateDeliveryPipeline" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the DeliveryPipeline resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *DeliveryPipeline) marshal(c *Client) ([]byte, error) { + m, err := expandDeliveryPipeline(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling DeliveryPipeline: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalDeliveryPipeline decodes JSON responses into the DeliveryPipeline resource schema. +func unmarshalDeliveryPipeline(b []byte, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapDeliveryPipeline(m, c, res) +} + +func unmarshalMapDeliveryPipeline(m map[string]interface{}, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + + flattened := flattenDeliveryPipeline(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandDeliveryPipeline expands DeliveryPipeline into a JSON request object. +func expandDeliveryPipeline(c *Client, f *DeliveryPipeline) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandDeliveryPipelineSerialPipeline(c, f.SerialPipeline, res); err != nil { + return nil, fmt.Errorf("error expanding SerialPipeline into serialPipeline: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serialPipeline"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v := f.Suspended; dcl.ValueShouldBeSent(v) { + m["suspended"] = v + } + + return m, nil +} + +// flattenDeliveryPipeline flattens DeliveryPipeline from a JSON request object into the +// DeliveryPipeline type. +func flattenDeliveryPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipeline { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &DeliveryPipeline{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.SerialPipeline = flattenDeliveryPipelineSerialPipeline(c, m["serialPipeline"], res) + resultRes.Condition = flattenDeliveryPipelineCondition(c, m["condition"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Suspended = dcl.FlattenBool(m["suspended"]) + + return resultRes +} + +// expandDeliveryPipelineSerialPipelineMap expands the contents of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineMap(c *Client, f map[string]DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineSlice expands the contents of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineSlice(c *Client, f []DeliveryPipelineSerialPipeline, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineMap flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipeline { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipeline{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipeline{} + } + + items := make(map[string]DeliveryPipelineSerialPipeline) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineSlice flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipeline { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipeline{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipeline{} + } + + items := make([]DeliveryPipelineSerialPipeline, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipeline expands an instance of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipeline(c *Client, f *DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesSlice(c, f.Stages, res); err != nil { + return nil, fmt.Errorf("error expanding Stages into stages: %w", err) + } else if v != nil { + m["stages"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipeline flattens an instance of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipeline { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipeline{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipeline + } + r.Stages = flattenDeliveryPipelineSerialPipelineStagesSlice(c, m["stages"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesMap expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesSlice expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesSlice(c *Client, f []DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesMap flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStages { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStages{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStages{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStages) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesSlice flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStages { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStages{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStages{} + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStages expands an instance of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStages(c *Client, f *DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetId; !dcl.IsEmptyValueIndirect(v) { + m["targetId"] = v + } + if v := f.Profiles; v != nil { + m["profiles"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, f.Strategy, res); err != nil { + return nil, fmt.Errorf("error expanding Strategy into strategy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["strategy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, f.DeployParameters, res); err != nil { + return nil, fmt.Errorf("error expanding DeployParameters into deployParameters: %w", err) + } else if v != nil { + m["deployParameters"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStages flattens an instance of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStages(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStages { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStages{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStages + } + r.TargetId = dcl.FlattenString(m["targetId"]) + r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + r.Strategy = flattenDeliveryPipelineSerialPipelineStagesStrategy(c, m["strategy"], res) + r.DeployParameters = flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, m["deployParameters"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategy expands an instance of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, f.Standard, res); err != nil { + return nil, fmt.Errorf("error expanding Standard into standard: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["standard"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, f.Canary, res); err != nil { + return nil, fmt.Errorf("error expanding Canary into canary: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["canary"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + r.Standard = flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, m["standard"], res) + r.Canary = flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, m["canary"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandard expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, f.Predeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Predeploy into predeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["predeploy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, f.Postdeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Postdeploy into postdeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["postdeploy"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandard flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + r.Verify = dcl.FlattenBool(m["verify"]) + r.Predeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, m["predeploy"], res) + r.Postdeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, m["postdeploy"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanarySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanarySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanary expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, f.RuntimeConfig, res); err != nil { + return nil, fmt.Errorf("error expanding RuntimeConfig into runtimeConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["runtimeConfig"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, f.CanaryDeployment, res); err != nil { + return nil, fmt.Errorf("error expanding CanaryDeployment into canaryDeployment: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["canaryDeployment"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, f.CustomCanaryDeployment, res); err != nil { + return nil, fmt.Errorf("error expanding CustomCanaryDeployment into customCanaryDeployment: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["customCanaryDeployment"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanary flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + r.RuntimeConfig = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, m["runtimeConfig"], res) + r.CanaryDeployment = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, m["canaryDeployment"], res) + r.CustomCanaryDeployment = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, m["customCanaryDeployment"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, f.Kubernetes, res); err != nil { + return nil, fmt.Errorf("error expanding Kubernetes into kubernetes: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubernetes"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, f.CloudRun, res); err != nil { + return nil, fmt.Errorf("error expanding CloudRun into cloudRun: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cloudRun"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + r.Kubernetes = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, m["kubernetes"], res) + r.CloudRun = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, m["cloudRun"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, f.GatewayServiceMesh, res); err != nil { + return nil, fmt.Errorf("error expanding GatewayServiceMesh into gatewayServiceMesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gatewayServiceMesh"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, f.ServiceNetworking, res); err != nil { + return nil, fmt.Errorf("error expanding ServiceNetworking into serviceNetworking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serviceNetworking"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + r.GatewayServiceMesh = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, m["gatewayServiceMesh"], res) + r.ServiceNetworking = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, m["serviceNetworking"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HttpRoute; !dcl.IsEmptyValueIndirect(v) { + m["httpRoute"] = v + } + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Deployment; !dcl.IsEmptyValueIndirect(v) { + m["deployment"] = v + } + if v := f.RouteUpdateWaitTime; !dcl.IsEmptyValueIndirect(v) { + m["routeUpdateWaitTime"] = v + } + if v := f.StableCutbackDuration; !dcl.IsEmptyValueIndirect(v) { + m["stableCutbackDuration"] = v + } + if v := f.PodSelectorLabel; !dcl.IsEmptyValueIndirect(v) { + m["podSelectorLabel"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, f.RouteDestinations, res); err != nil { + return nil, fmt.Errorf("error expanding RouteDestinations into routeDestinations: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["routeDestinations"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + r.HttpRoute = dcl.FlattenString(m["httpRoute"]) + r.Service = dcl.FlattenString(m["service"]) + r.Deployment = dcl.FlattenString(m["deployment"]) + r.RouteUpdateWaitTime = dcl.FlattenString(m["routeUpdateWaitTime"]) + r.StableCutbackDuration = dcl.FlattenString(m["stableCutbackDuration"]) + r.PodSelectorLabel = dcl.FlattenString(m["podSelectorLabel"]) + r.RouteDestinations = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, m["routeDestinations"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DestinationIds; v != nil { + m["destinationIds"] = v + } + if v := f.PropagateService; !dcl.IsEmptyValueIndirect(v) { + m["propagateService"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } + r.DestinationIds = dcl.FlattenStringSlice(m["destinationIds"]) + r.PropagateService = dcl.FlattenBool(m["propagateService"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Deployment; !dcl.IsEmptyValueIndirect(v) { + m["deployment"] = v + } + if v := f.DisablePodOverprovisioning; !dcl.IsEmptyValueIndirect(v) { + m["disablePodOverprovisioning"] = v + } + if v := f.PodSelectorLabel; !dcl.IsEmptyValueIndirect(v) { + m["podSelectorLabel"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + r.Service = dcl.FlattenString(m["service"]) + r.Deployment = dcl.FlattenString(m["deployment"]) + r.DisablePodOverprovisioning = dcl.FlattenBool(m["disablePodOverprovisioning"]) + r.PodSelectorLabel = dcl.FlattenString(m["podSelectorLabel"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutomaticTrafficControl; !dcl.IsEmptyValueIndirect(v) { + m["automaticTrafficControl"] = v + } + if v := f.CanaryRevisionTags; v != nil { + m["canaryRevisionTags"] = v + } + if v := f.PriorRevisionTags; v != nil { + m["priorRevisionTags"] = v + } + if v := f.StableRevisionTags; v != nil { + m["stableRevisionTags"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + r.AutomaticTrafficControl = dcl.FlattenBool(m["automaticTrafficControl"]) + r.CanaryRevisionTags = dcl.FlattenStringSlice(m["canaryRevisionTags"]) + r.PriorRevisionTags = dcl.FlattenStringSlice(m["priorRevisionTags"]) + r.StableRevisionTags = dcl.FlattenStringSlice(m["stableRevisionTags"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Percentages; v != nil { + m["percentages"] = v + } + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, f.Predeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Predeploy into predeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["predeploy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, f.Postdeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Postdeploy into postdeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["postdeploy"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + r.Percentages = dcl.FlattenIntSlice(m["percentages"]) + r.Verify = dcl.FlattenBool(m["verify"]) + r.Predeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, m["predeploy"], res) + r.Postdeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, m["postdeploy"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, f.PhaseConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding PhaseConfigs into phaseConfigs: %w", err) + } else if v != nil { + m["phaseConfigs"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + r.PhaseConfigs = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, m["phaseConfigs"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.PhaseId; !dcl.IsEmptyValueIndirect(v) { + m["phaseId"] = v + } + if v := f.Percentage; !dcl.IsEmptyValueIndirect(v) { + m["percentage"] = v + } + if v := f.Profiles; v != nil { + m["profiles"] = v + } + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, f.Predeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Predeploy into predeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["predeploy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, f.Postdeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Postdeploy into postdeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["postdeploy"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } + r.PhaseId = dcl.FlattenString(m["phaseId"]) + r.Percentage = dcl.FlattenInteger(m["percentage"]) + r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + r.Verify = dcl.FlattenBool(m["verify"]) + r.Predeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, m["predeploy"], res) + r.Postdeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, m["postdeploy"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesDeployParametersMap expands the contents of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesDeployParametersMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesDeployParameters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice expands the contents of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesDeployParameters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesDeployParametersMap flattens the contents of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesDeployParametersMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesDeployParameters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesDeployParameters) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice flattens the contents of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesDeployParameters { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesDeployParameters expands an instance of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, f *DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Values; !dcl.IsEmptyValueIndirect(v) { + m["values"] = v + } + if v := f.MatchTargetLabels; !dcl.IsEmptyValueIndirect(v) { + m["matchTargetLabels"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesDeployParameters flattens an instance of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesDeployParameters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesDeployParameters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } + r.Values = dcl.FlattenKeyValuePairs(m["values"]) + r.MatchTargetLabels = dcl.FlattenKeyValuePairs(m["matchTargetLabels"]) + + return r +} + +// expandDeliveryPipelineConditionMap expands the contents of DeliveryPipelineCondition into a JSON +// request object. +func expandDeliveryPipelineConditionMap(c *Client, f map[string]DeliveryPipelineCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionSlice expands the contents of DeliveryPipelineCondition into a JSON +// request object. +func expandDeliveryPipelineConditionSlice(c *Client, f []DeliveryPipelineCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionMap flattens the contents of DeliveryPipelineCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineCondition{} + } + + items := make(map[string]DeliveryPipelineCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionSlice flattens the contents of DeliveryPipelineCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineCondition{} + } + + items := make([]DeliveryPipelineCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineCondition expands an instance of DeliveryPipelineCondition into a JSON +// request object. +func expandDeliveryPipelineCondition(c *Client, f *DeliveryPipelineCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineConditionPipelineReadyCondition(c, f.PipelineReadyCondition, res); err != nil { + return nil, fmt.Errorf("error expanding PipelineReadyCondition into pipelineReadyCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["pipelineReadyCondition"] = v + } + if v, err := expandDeliveryPipelineConditionTargetsPresentCondition(c, f.TargetsPresentCondition, res); err != nil { + return nil, fmt.Errorf("error expanding TargetsPresentCondition into targetsPresentCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["targetsPresentCondition"] = v + } + if v, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, f.TargetsTypeCondition, res); err != nil { + return nil, fmt.Errorf("error expanding TargetsTypeCondition into targetsTypeCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["targetsTypeCondition"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineCondition flattens an instance of DeliveryPipelineCondition from a JSON +// response object. +func flattenDeliveryPipelineCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineCondition + } + r.PipelineReadyCondition = flattenDeliveryPipelineConditionPipelineReadyCondition(c, m["pipelineReadyCondition"], res) + r.TargetsPresentCondition = flattenDeliveryPipelineConditionTargetsPresentCondition(c, m["targetsPresentCondition"], res) + r.TargetsTypeCondition = flattenDeliveryPipelineConditionTargetsTypeCondition(c, m["targetsTypeCondition"], res) + + return r +} + +// expandDeliveryPipelineConditionPipelineReadyConditionMap expands the contents of DeliveryPipelineConditionPipelineReadyCondition into a JSON +// request object. +func expandDeliveryPipelineConditionPipelineReadyConditionMap(c *Client, f map[string]DeliveryPipelineConditionPipelineReadyCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionPipelineReadyCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionPipelineReadyConditionSlice expands the contents of DeliveryPipelineConditionPipelineReadyCondition into a JSON +// request object. +func expandDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, f []DeliveryPipelineConditionPipelineReadyCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionPipelineReadyCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionPipelineReadyConditionMap flattens the contents of DeliveryPipelineConditionPipelineReadyCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionPipelineReadyConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionPipelineReadyCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionPipelineReadyCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionPipelineReadyCondition{} + } + + items := make(map[string]DeliveryPipelineConditionPipelineReadyCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionPipelineReadyCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionPipelineReadyConditionSlice flattens the contents of DeliveryPipelineConditionPipelineReadyCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionPipelineReadyCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionPipelineReadyCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionPipelineReadyCondition{} + } + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionPipelineReadyCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionPipelineReadyCondition expands an instance of DeliveryPipelineConditionPipelineReadyCondition into a JSON +// request object. +func expandDeliveryPipelineConditionPipelineReadyCondition(c *Client, f *DeliveryPipelineConditionPipelineReadyCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionPipelineReadyCondition flattens an instance of DeliveryPipelineConditionPipelineReadyCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionPipelineReadyCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionPipelineReadyCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionPipelineReadyCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionPipelineReadyCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandDeliveryPipelineConditionTargetsPresentConditionMap expands the contents of DeliveryPipelineConditionTargetsPresentCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsPresentConditionMap(c *Client, f map[string]DeliveryPipelineConditionTargetsPresentCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionTargetsPresentCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionTargetsPresentConditionSlice expands the contents of DeliveryPipelineConditionTargetsPresentCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, f []DeliveryPipelineConditionTargetsPresentCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionTargetsPresentCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionTargetsPresentConditionMap flattens the contents of DeliveryPipelineConditionTargetsPresentCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsPresentConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionTargetsPresentCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionTargetsPresentCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionTargetsPresentCondition{} + } + + items := make(map[string]DeliveryPipelineConditionTargetsPresentCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionTargetsPresentCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionTargetsPresentConditionSlice flattens the contents of DeliveryPipelineConditionTargetsPresentCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionTargetsPresentCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionTargetsPresentCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionTargetsPresentCondition{} + } + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionTargetsPresentCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionTargetsPresentCondition expands an instance of DeliveryPipelineConditionTargetsPresentCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsPresentCondition(c *Client, f *DeliveryPipelineConditionTargetsPresentCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.MissingTargets; v != nil { + m["missingTargets"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionTargetsPresentCondition flattens an instance of DeliveryPipelineConditionTargetsPresentCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsPresentCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionTargetsPresentCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionTargetsPresentCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionTargetsPresentCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.MissingTargets = dcl.FlattenStringSlice(m["missingTargets"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandDeliveryPipelineConditionTargetsTypeConditionMap expands the contents of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeConditionMap(c *Client, f map[string]DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionTargetsTypeConditionSlice expands the contents of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, f []DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionTargetsTypeConditionMap flattens the contents of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionTargetsTypeCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionTargetsTypeCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionTargetsTypeCondition{} + } + + items := make(map[string]DeliveryPipelineConditionTargetsTypeCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionTargetsTypeCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionTargetsTypeConditionSlice flattens the contents of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionTargetsTypeCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionTargetsTypeCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionTargetsTypeCondition{} + } + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionTargetsTypeCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionTargetsTypeCondition expands an instance of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeCondition(c *Client, f *DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.ErrorDetails; !dcl.IsEmptyValueIndirect(v) { + m["errorDetails"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionTargetsTypeCondition flattens an instance of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionTargetsTypeCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionTargetsTypeCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionTargetsTypeCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.ErrorDetails = dcl.FlattenString(m["errorDetails"]) + + return r +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *DeliveryPipeline) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalDeliveryPipeline(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type deliveryPipelineDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp deliveryPipelineApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToDeliveryPipelineDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]deliveryPipelineDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []deliveryPipelineDiff + // For each operation name, create a deliveryPipelineDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := deliveryPipelineDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToDeliveryPipelineApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToDeliveryPipelineApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (deliveryPipelineApiOperation, error) { + switch opName { + + case "updateDeliveryPipelineUpdateDeliveryPipelineOperation": + return &updateDeliveryPipelineUpdateDeliveryPipelineOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractDeliveryPipelineFields(r *DeliveryPipeline) error { + vSerialPipeline := r.SerialPipeline + if vSerialPipeline == nil { + // note: explicitly not the empty object. + vSerialPipeline = &DeliveryPipelineSerialPipeline{} + } + if err := extractDeliveryPipelineSerialPipelineFields(r, vSerialPipeline); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSerialPipeline) { + r.SerialPipeline = vSerialPipeline + } + vCondition := r.Condition + if vCondition == nil { + // note: explicitly not the empty object. + vCondition = &DeliveryPipelineCondition{} + } + if err := extractDeliveryPipelineConditionFields(r, vCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCondition) { + r.Condition = vCondition + } + return nil +} +func extractDeliveryPipelineSerialPipelineFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipeline) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStages) error { + vStrategy := o.Strategy + if vStrategy == nil { + // note: explicitly not the empty object. + vStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyFields(r, vStrategy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStrategy) { + o.Strategy = vStrategy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategy) error { + vStandard := o.Standard + if vStandard == nil { + // note: explicitly not the empty object. + vStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r, vStandard); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStandard) { + o.Standard = vStandard + } + vCanary := o.Canary + if vCanary == nil { + // note: explicitly not the empty object. + vCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r, vCanary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanary) { + o.Canary = vCanary + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandard) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanary) error { + vRuntimeConfig := o.RuntimeConfig + if vRuntimeConfig == nil { + // note: explicitly not the empty object. + vRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r, vRuntimeConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRuntimeConfig) { + o.RuntimeConfig = vRuntimeConfig + } + vCanaryDeployment := o.CanaryDeployment + if vCanaryDeployment == nil { + // note: explicitly not the empty object. + vCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r, vCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanaryDeployment) { + o.CanaryDeployment = vCanaryDeployment + } + vCustomCanaryDeployment := o.CustomCanaryDeployment + if vCustomCanaryDeployment == nil { + // note: explicitly not the empty object. + vCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r, vCustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomCanaryDeployment) { + o.CustomCanaryDeployment = vCustomCanaryDeployment + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) error { + vKubernetes := o.Kubernetes + if vKubernetes == nil { + // note: explicitly not the empty object. + vKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r, vKubernetes); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetes) { + o.Kubernetes = vKubernetes + } + vCloudRun := o.CloudRun + if vCloudRun == nil { + // note: explicitly not the empty object. + vCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r, vCloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCloudRun) { + o.CloudRun = vCloudRun + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) error { + vGatewayServiceMesh := o.GatewayServiceMesh + if vGatewayServiceMesh == nil { + // note: explicitly not the empty object. + vGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r, vGatewayServiceMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGatewayServiceMesh) { + o.GatewayServiceMesh = vGatewayServiceMesh + } + vServiceNetworking := o.ServiceNetworking + if vServiceNetworking == nil { + // note: explicitly not the empty object. + vServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r, vServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceNetworking) { + o.ServiceNetworking = vServiceNetworking + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) error { + vRouteDestinations := o.RouteDestinations + if vRouteDestinations == nil { + // note: explicitly not the empty object. + vRouteDestinations = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r, vRouteDestinations); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRouteDestinations) { + o.RouteDestinations = vRouteDestinations + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesDeployParametersFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesDeployParameters) error { + return nil +} +func extractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipelineCondition) error { + vPipelineReadyCondition := o.PipelineReadyCondition + if vPipelineReadyCondition == nil { + // note: explicitly not the empty object. + vPipelineReadyCondition = &DeliveryPipelineConditionPipelineReadyCondition{} + } + if err := extractDeliveryPipelineConditionPipelineReadyConditionFields(r, vPipelineReadyCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPipelineReadyCondition) { + o.PipelineReadyCondition = vPipelineReadyCondition + } + vTargetsPresentCondition := o.TargetsPresentCondition + if vTargetsPresentCondition == nil { + // note: explicitly not the empty object. + vTargetsPresentCondition = &DeliveryPipelineConditionTargetsPresentCondition{} + } + if err := extractDeliveryPipelineConditionTargetsPresentConditionFields(r, vTargetsPresentCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsPresentCondition) { + o.TargetsPresentCondition = vTargetsPresentCondition + } + vTargetsTypeCondition := o.TargetsTypeCondition + if vTargetsTypeCondition == nil { + // note: explicitly not the empty object. + vTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{} + } + if err := extractDeliveryPipelineConditionTargetsTypeConditionFields(r, vTargetsTypeCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsTypeCondition) { + o.TargetsTypeCondition = vTargetsTypeCondition + } + return nil +} +func extractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionPipelineReadyCondition) error { + return nil +} +func extractDeliveryPipelineConditionTargetsPresentConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsPresentCondition) error { + return nil +} +func extractDeliveryPipelineConditionTargetsTypeConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsTypeCondition) error { + return nil +} + +func postReadExtractDeliveryPipelineFields(r *DeliveryPipeline) error { + vSerialPipeline := r.SerialPipeline + if vSerialPipeline == nil { + // note: explicitly not the empty object. + vSerialPipeline = &DeliveryPipelineSerialPipeline{} + } + if err := postReadExtractDeliveryPipelineSerialPipelineFields(r, vSerialPipeline); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSerialPipeline) { + r.SerialPipeline = vSerialPipeline + } + vCondition := r.Condition + if vCondition == nil { + // note: explicitly not the empty object. + vCondition = &DeliveryPipelineCondition{} + } + if err := postReadExtractDeliveryPipelineConditionFields(r, vCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCondition) { + r.Condition = vCondition + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipeline) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStages) error { + vStrategy := o.Strategy + if vStrategy == nil { + // note: explicitly not the empty object. + vStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyFields(r, vStrategy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStrategy) { + o.Strategy = vStrategy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategy) error { + vStandard := o.Standard + if vStandard == nil { + // note: explicitly not the empty object. + vStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r, vStandard); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStandard) { + o.Standard = vStandard + } + vCanary := o.Canary + if vCanary == nil { + // note: explicitly not the empty object. + vCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r, vCanary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanary) { + o.Canary = vCanary + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandard) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanary) error { + vRuntimeConfig := o.RuntimeConfig + if vRuntimeConfig == nil { + // note: explicitly not the empty object. + vRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r, vRuntimeConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRuntimeConfig) { + o.RuntimeConfig = vRuntimeConfig + } + vCanaryDeployment := o.CanaryDeployment + if vCanaryDeployment == nil { + // note: explicitly not the empty object. + vCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r, vCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanaryDeployment) { + o.CanaryDeployment = vCanaryDeployment + } + vCustomCanaryDeployment := o.CustomCanaryDeployment + if vCustomCanaryDeployment == nil { + // note: explicitly not the empty object. + vCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r, vCustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomCanaryDeployment) { + o.CustomCanaryDeployment = vCustomCanaryDeployment + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) error { + vKubernetes := o.Kubernetes + if vKubernetes == nil { + // note: explicitly not the empty object. + vKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r, vKubernetes); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetes) { + o.Kubernetes = vKubernetes + } + vCloudRun := o.CloudRun + if vCloudRun == nil { + // note: explicitly not the empty object. + vCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r, vCloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCloudRun) { + o.CloudRun = vCloudRun + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) error { + vGatewayServiceMesh := o.GatewayServiceMesh + if vGatewayServiceMesh == nil { + // note: explicitly not the empty object. + vGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r, vGatewayServiceMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGatewayServiceMesh) { + o.GatewayServiceMesh = vGatewayServiceMesh + } + vServiceNetworking := o.ServiceNetworking + if vServiceNetworking == nil { + // note: explicitly not the empty object. + vServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r, vServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceNetworking) { + o.ServiceNetworking = vServiceNetworking + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) error { + vRouteDestinations := o.RouteDestinations + if vRouteDestinations == nil { + // note: explicitly not the empty object. + vRouteDestinations = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r, vRouteDestinations); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRouteDestinations) { + o.RouteDestinations = vRouteDestinations + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesDeployParametersFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesDeployParameters) error { + return nil +} +func postReadExtractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipelineCondition) error { + vPipelineReadyCondition := o.PipelineReadyCondition + if vPipelineReadyCondition == nil { + // note: explicitly not the empty object. + vPipelineReadyCondition = &DeliveryPipelineConditionPipelineReadyCondition{} + } + if err := extractDeliveryPipelineConditionPipelineReadyConditionFields(r, vPipelineReadyCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPipelineReadyCondition) { + o.PipelineReadyCondition = vPipelineReadyCondition + } + vTargetsPresentCondition := o.TargetsPresentCondition + if vTargetsPresentCondition == nil { + // note: explicitly not the empty object. + vTargetsPresentCondition = &DeliveryPipelineConditionTargetsPresentCondition{} + } + if err := extractDeliveryPipelineConditionTargetsPresentConditionFields(r, vTargetsPresentCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsPresentCondition) { + o.TargetsPresentCondition = vTargetsPresentCondition + } + vTargetsTypeCondition := o.TargetsTypeCondition + if vTargetsTypeCondition == nil { + // note: explicitly not the empty object. + vTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{} + } + if err := extractDeliveryPipelineConditionTargetsTypeConditionFields(r, vTargetsTypeCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsTypeCondition) { + o.TargetsTypeCondition = vTargetsTypeCondition + } + return nil +} +func postReadExtractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionPipelineReadyCondition) error { + return nil +} +func postReadExtractDeliveryPipelineConditionTargetsPresentConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsPresentCondition) error { + return nil +} +func postReadExtractDeliveryPipelineConditionTargetsTypeConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsTypeCondition) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go new file mode 100644 index 000000000000..f42c684b2f3c --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package clouddeploy + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLClouddeployClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ClouddeployBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go new file mode 100644 index 000000000000..68c132f985b1 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go @@ -0,0 +1,1789 @@ +package clouddeploy + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceClouddeployDeliveryPipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceClouddeployDeliveryPipelineCreate, + Read: resourceClouddeployDeliveryPipelineRead, + Update: resourceClouddeployDeliveryPipelineUpdate, + Delete: resourceClouddeployDeliveryPipelineDelete, + + Importer: &schema.ResourceImporter{ + State: resourceClouddeployDeliveryPipelineImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the `DeliveryPipeline`. Max length is 255 characters.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "serial_pipeline": { + Type: schema.TypeList, + Optional: true, + Description: "SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineSchema(), + }, + + "suspended": { + Type: schema.TypeBool, + Optional: true, + Description: "When suspended, no new releases or rollouts can be created, but in-progress ones will complete.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "condition": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Information around the state of the Delivery Pipeline.", + Elem: ClouddeployDeliveryPipelineConditionSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the pipeline was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique identifier of the `DeliveryPipeline`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Most recent time at which the pipeline was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stages": { + Type: schema.TypeList, + Optional: true, + Description: "Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deploy_parameters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The deploy parameters to use for the target in this stage.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersSchema(), + }, + + "profiles": { + Type: schema.TypeList, + Optional: true, + Description: "Skaffold profiles to use when rendering the manifest for this stage's `Target`.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "strategy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The strategy to use for a `Rollout` to this stage.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema(), + }, + + "target_id": { + Type: schema.TypeString, + Optional: true, + Description: "The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeMap, + Required: true, + Description: "Required. Values are deploy parameters in key-value pairs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "match_target_labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary": { + Type: schema.TypeList, + Optional: true, + Description: "Canary deployment strategy provides progressive percentage based deployments to a Target.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanarySchema(), + }, + + "standard": { + Type: schema.TypeList, + Optional: true, + Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanarySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary_deployment": { + Type: schema.TypeList, + Optional: true, + Description: "Configures the progressive based deployment for a Target.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSchema(), + }, + + "custom_canary_deployment": { + Type: schema.TypeList, + Optional: true, + Description: "Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSchema(), + }, + + "runtime_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percentages": { + Type: schema.TypeList, + Required: true, + Description: "Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "postdeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySchema(), + }, + + "predeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySchema(), + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to run verify tests after each percentage deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "phase_configs": { + Type: schema.TypeList, + Required: true, + Description: "Required. Configuration for each phase in the canary deployment in the order executed.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percentage": { + Type: schema.TypeInt, + Required: true, + Description: "Required. Percentage deployment for the phase.", + }, + + "phase_id": { + Type: schema.TypeString, + Required: true, + Description: "Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + }, + + "postdeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySchema(), + }, + + "predeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySchema(), + }, + + "profiles": { + Type: schema.TypeList, + Optional: true, + Description: "Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to run verify tests after the deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_run": { + Type: schema.TypeList, + Optional: true, + Description: "Cloud Run runtime configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSchema(), + }, + + "kubernetes": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes runtime configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automatic_traffic_control": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.", + }, + + "canary_revision_tags": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A list of tags that are added to the canary revision while the canary phase is in progress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "prior_revision_tags": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A list of tags that are added to the prior revision while the canary phase is in progress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "stable_revision_tags": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A list of tags that are added to the final stable revision when the stable phase is applied.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gateway_service_mesh": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes Gateway API service mesh configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSchema(), + }, + + "service_networking": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes Service networking configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service.", + }, + + "http_route": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Gateway API HTTPRoute.", + }, + + "service": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Service.", + }, + + "pod_selector_label": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources.", + }, + + "route_destinations": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSchema(), + }, + + "route_update_wait_time": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time.", + }, + + "stable_cutback_duration": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_ids": { + Type: schema.TypeList, + Required: true, + Description: "Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and \"@self\" to include the Target cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "propagate_service": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service.", + }, + + "service": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Service.", + }, + + "disable_pod_overprovisioning": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", + }, + + "pod_selector_label": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "postdeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySchema(), + }, + + "predeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySchema(), + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to verify a deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pipeline_ready_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details around the Pipeline's overall status.", + Elem: ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema(), + }, + + "targets_present_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details around targets enumerated in the pipeline.", + Elem: ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema(), + }, + + "targets_type_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details on the whether the targets enumerated in the pipeline are of the same type.", + Elem: ClouddeployDeliveryPipelineConditionTargetsTypeConditionSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last time the condition was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "missing_targets": { + Type: schema.TypeList, + Computed: true, + Description: "The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if there aren't any missing Targets.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last time the condition was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionTargetsTypeConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_details": { + Type: schema.TypeString, + Computed: true, + Description: "Human readable error message.", + }, + + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters.", + }, + }, + } +} + +func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) + + return resourceClouddeployDeliveryPipelineRead(d, meta) +} + +func resourceClouddeployDeliveryPipelineRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetDeliveryPipeline(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ClouddeployDeliveryPipeline %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("serial_pipeline", flattenClouddeployDeliveryPipelineSerialPipeline(res.SerialPipeline)); err != nil { + return fmt.Errorf("error setting serial_pipeline in state: %s", err) + } + if err = d.Set("suspended", res.Suspended); err != nil { + return fmt.Errorf("error setting suspended in state: %s", err) + } + if err = d.Set("annotations", flattenClouddeployDeliveryPipelineAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("condition", flattenClouddeployDeliveryPipelineCondition(res.Condition)); err != nil { + return fmt.Errorf("error setting condition in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("labels", flattenClouddeployDeliveryPipelineLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("terraform_labels", flattenClouddeployDeliveryPipelineTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceClouddeployDeliveryPipelineUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) + + return resourceClouddeployDeliveryPipelineRead(d, meta) +} + +func resourceClouddeployDeliveryPipelineDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + log.Printf("[DEBUG] Deleting DeliveryPipeline %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteDeliveryPipeline(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished deleting DeliveryPipeline %q", d.Id()) + return nil +} + +func resourceClouddeployDeliveryPipelineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/deliveryPipelines/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandClouddeployDeliveryPipelineSerialPipeline(o interface{}) *DeliveryPipelineSerialPipeline { + if o == nil { + return EmptyDeliveryPipelineSerialPipeline + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipeline + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipeline{ + Stages: expandClouddeployDeliveryPipelineSerialPipelineStagesArray(obj["stages"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipeline(obj *DeliveryPipelineSerialPipeline) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "stages": flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(obj.Stages), + } + + return []interface{}{transformed} + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesArray(o interface{}) []DeliveryPipelineSerialPipelineStages { + if o == nil { + return make([]DeliveryPipelineSerialPipelineStages, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]DeliveryPipelineSerialPipelineStages, 0) + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStages(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStages(o interface{}) *DeliveryPipelineSerialPipelineStages { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStages + } + + obj := o.(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStages{ + DeployParameters: expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(obj["deploy_parameters"]), + Profiles: tpgdclresource.ExpandStringArray(obj["profiles"]), + Strategy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj["strategy"]), + TargetId: dcl.String(obj["target_id"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(objs []DeliveryPipelineSerialPipelineStages) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStages(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStages(obj *DeliveryPipelineSerialPipelineStages) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deploy_parameters": flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(obj.DeployParameters), + "profiles": obj.Profiles, + "strategy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj.Strategy), + "target_id": obj.TargetId, + } + + return transformed + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(o interface{}) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if o == nil { + return make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0) + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(o interface{}) *DeliveryPipelineSerialPipelineStagesDeployParameters { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } + + obj := o.(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesDeployParameters{ + Values: tpgresource.CheckStringMap(obj["values"]), + MatchTargetLabels: tpgresource.CheckStringMap(obj["match_target_labels"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(objs []DeliveryPipelineSerialPipelineStagesDeployParameters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(obj *DeliveryPipelineSerialPipelineStagesDeployParameters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "values": obj.Values, + "match_target_labels": obj.MatchTargetLabels, + } + + return transformed + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategy{ + Canary: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj["canary"]), + Standard: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj["standard"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj *DeliveryPipelineSerialPipelineStagesStrategy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "canary": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj.Canary), + "standard": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj.Standard), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanary{ + CanaryDeployment: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj["canary_deployment"]), + CustomCanaryDeployment: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj["custom_canary_deployment"]), + RuntimeConfig: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj["runtime_config"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj *DeliveryPipelineSerialPipelineStagesStrategyCanary) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "canary_deployment": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj.CanaryDeployment), + "custom_canary_deployment": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj.CustomCanaryDeployment), + "runtime_config": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj.RuntimeConfig), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{ + Percentages: tpgdclresource.ExpandIntegerArray(obj["percentages"]), + Postdeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(obj["postdeploy"]), + Predeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(obj["predeploy"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "percentages": obj.Percentages, + "postdeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(obj.Postdeploy), + "predeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(obj.Predeploy), + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{ + PhaseConfigs: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(obj["phase_configs"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "phase_configs": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(obj.PhaseConfigs), + } + + return []interface{}{transformed} + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(o interface{}) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if o == nil { + return make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0) + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } + + obj := o.(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{ + Percentage: dcl.Int64(int64(obj["percentage"].(int))), + PhaseId: dcl.String(obj["phase_id"].(string)), + Postdeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(obj["postdeploy"]), + Predeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(obj["predeploy"]), + Profiles: tpgdclresource.ExpandStringArray(obj["profiles"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(objs []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "percentage": obj.Percentage, + "phase_id": obj.PhaseId, + "postdeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(obj.Postdeploy), + "predeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(obj.Predeploy), + "profiles": obj.Profiles, + "verify": obj.Verify, + } + + return transformed + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{ + CloudRun: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj["cloud_run"]), + Kubernetes: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj["kubernetes"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cloud_run": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj.CloudRun), + "kubernetes": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj.Kubernetes), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{ + AutomaticTrafficControl: dcl.Bool(obj["automatic_traffic_control"].(bool)), + CanaryRevisionTags: tpgdclresource.ExpandStringArray(obj["canary_revision_tags"]), + PriorRevisionTags: tpgdclresource.ExpandStringArray(obj["prior_revision_tags"]), + StableRevisionTags: tpgdclresource.ExpandStringArray(obj["stable_revision_tags"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "automatic_traffic_control": obj.AutomaticTrafficControl, + "canary_revision_tags": obj.CanaryRevisionTags, + "prior_revision_tags": obj.PriorRevisionTags, + "stable_revision_tags": obj.StableRevisionTags, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{ + GatewayServiceMesh: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj["gateway_service_mesh"]), + ServiceNetworking: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj["service_networking"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gateway_service_mesh": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj.GatewayServiceMesh), + "service_networking": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj.ServiceNetworking), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{ + Deployment: dcl.String(obj["deployment"].(string)), + HttpRoute: dcl.String(obj["http_route"].(string)), + Service: dcl.String(obj["service"].(string)), + PodSelectorLabel: dcl.String(obj["pod_selector_label"].(string)), + RouteDestinations: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(obj["route_destinations"]), + RouteUpdateWaitTime: dcl.String(obj["route_update_wait_time"].(string)), + StableCutbackDuration: dcl.String(obj["stable_cutback_duration"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment": obj.Deployment, + "http_route": obj.HttpRoute, + "service": obj.Service, + "pod_selector_label": obj.PodSelectorLabel, + "route_destinations": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(obj.RouteDestinations), + "route_update_wait_time": obj.RouteUpdateWaitTime, + "stable_cutback_duration": obj.StableCutbackDuration, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{ + DestinationIds: tpgdclresource.ExpandStringArray(obj["destination_ids"]), + PropagateService: dcl.Bool(obj["propagate_service"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "destination_ids": obj.DestinationIds, + "propagate_service": obj.PropagateService, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{ + Deployment: dcl.String(obj["deployment"].(string)), + Service: dcl.String(obj["service"].(string)), + DisablePodOverprovisioning: dcl.Bool(obj["disable_pod_overprovisioning"].(bool)), + PodSelectorLabel: dcl.String(obj["pod_selector_label"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment": obj.Deployment, + "service": obj.Service, + "disable_pod_overprovisioning": obj.DisablePodOverprovisioning, + "pod_selector_label": obj.PodSelectorLabel, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyStandard{ + Postdeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(obj["postdeploy"]), + Predeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(obj["predeploy"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj *DeliveryPipelineSerialPipelineStagesStrategyStandard) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "postdeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(obj.Postdeploy), + "predeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(obj.Predeploy), + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineCondition(obj *DeliveryPipelineCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pipeline_ready_condition": flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj.PipelineReadyCondition), + "targets_present_condition": flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj.TargetsPresentCondition), + "targets_type_condition": flattenClouddeployDeliveryPipelineConditionTargetsTypeCondition(obj.TargetsTypeCondition), + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj *DeliveryPipelineConditionPipelineReadyCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "status": obj.Status, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj *DeliveryPipelineConditionTargetsPresentCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "missing_targets": obj.MissingTargets, + "status": obj.Status, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionTargetsTypeCondition(obj *DeliveryPipelineConditionTargetsTypeCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "error_details": obj.ErrorDetails, + "status": obj.Status, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployDeliveryPipelineTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployDeliveryPipelineAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl new file mode 100644 index 000000000000..ddb0a49b441b --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl @@ -0,0 +1,751 @@ +package clouddeploy_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +{{- if eq $.TargetVersionName "ga" }} +func TestAccClouddeployDeliveryPipeline_DeliveryPipeline(t *testing.T) { +{{- else }} +func TestAccClouddeployDeliveryPipeline_CanaryDeliveryPipeline(t *testing.T) { +{{- end }} + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ +{{- if ne $.TargetVersionName "ga" }} + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_CanaryDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_CanaryDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployDeliveryPipeline_DeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ +{{- end }} + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_DeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_DeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +{{- if eq $.TargetVersionName "ga" }} + +func testAccClouddeployDeliveryPipeline_DeliveryPipeline(context map[string]interface{}) string { +{{- else }} +func TestAccClouddeployDeliveryPipeline_VerifyDeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_VerifyDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_VerifyDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func testAccClouddeployDeliveryPipeline_CanaryDeliveryPipeline(context map[string]interface{}) string { +{{- end }} + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +{{- if ne $.TargetVersionName "ga" }} + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + canary { + custom_canary_deployment { + phase_configs { + percentage = 50 + phase_id = "first" + verify = true + } + + phase_configs { + percentage = 100 + phase_id = "stable" + verify = false + } + } + + runtime_config { + kubernetes { + gateway_service_mesh { + deployment = "example-deployment" + http_route = "example-http-route" + service = "example-service" + pod_selector_label = "example.com/app-name" + } + } + } + } + } + + target_id = "example-target-two" + } + + stages { + profiles = ["example-profile-four", "example-profile-five"] + + strategy { + canary { + canary_deployment { + percentages = [0, 5, 20] + verify = true + } + + runtime_config { + kubernetes { + gateway_service_mesh { + deployment = "example-deployment" + http_route = "example-http-route" + service = "example-service" + pod_selector_label = "example.com/app-name" + + route_destinations { + destination_ids = ["example-destination-id"] + propagate_service = true + } + } + } + } + } + } + + target_id = "example-target-three" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + canary { + canary_deployment { + percentages = [25] + verify = true + } + + runtime_config { + kubernetes { + service_networking { + deployment = "example-deployment" + service = "example-service" + pod_selector_label = "example.com/app-name" + } + } + } + } + } + + target_id = "example-target-two" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + canary { + canary_deployment { + percentages = [25] + verify = true + } + + runtime_config { + cloud_run { + automatic_traffic_control = true + } + } + } + } + + target_id = "example-target-two" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_DeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +{{- end }} +} + + +`, context) +} + +func testAccClouddeployDeliveryPipeline_DeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + target_id = "example-target-two" + } + + stages { + profiles = ["example-profile-four", "example-profile-five"] + target_id = "example-target-three" + } + } + + suspended = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + +{{- if ne $.TargetVersionName "ga" }} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_VerifyDeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_VerifyDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + standard { + verify = true + } + } + + target_id = "example-target-two" + } + + stages { + profiles = ["example-profile-four", "example-profile-five"] + target_id = "example-target-three" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} +{{- end }} + +`, context) +} + +func testAccCheckClouddeployDeliveryPipelineDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_clouddeploy_delivery_pipeline" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &clouddeploy.DeliveryPipeline{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + Suspended: dcl.Bool(rs.Primary.Attributes["suspended"] == "true"), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := clouddeploy.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetDeliveryPipeline(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_clouddeploy_delivery_pipeline still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml index 0ec222248960..71de656bc1a2 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_clouddeploy_delivery_pipeline' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'clouddeploy.googleapis.com' api_version: 'v1' api_resource_type_kind: 'DeliveryPipeline' diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go new file mode 100644 index 000000000000..8cfa22a46176 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go @@ -0,0 +1,53 @@ +package clouddeploy + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("ClouddeployDeliveryPipeline", testSweepClouddeployDeliveryPipeline) +} + +func testSweepClouddeployDeliveryPipeline(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ClouddeployDeliveryPipeline") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLClouddeployClient(config, config.UserAgent, "", 0) + err = client.DeleteAllDeliveryPipeline(context.Background(), d["project"], d["location"], isDeletableClouddeployDeliveryPipeline) + if err != nil { + return err + } + return nil +} + +func isDeletableClouddeployDeliveryPipeline(r *DeliveryPipeline) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go new file mode 100644 index 000000000000..5192f27461eb --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go @@ -0,0 +1,1154 @@ +package clouddeploy + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceClouddeployTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceClouddeployTargetCreate, + Read: resourceClouddeployTargetRead, + Update: resourceClouddeployTargetUpdate, + Delete: resourceClouddeployTargetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceClouddeployTargetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + }, + + "anthos_cluster": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying an Anthos Cluster.", + MaxItems: 1, + Elem: ClouddeployTargetAnthosClusterSchema(), + ConflictsWith: []string{"gke", "run", "multi_target", "custom_target"}, + }, + + "associated_entities": { + Type: schema.TypeSet, + Optional: true, + Description: "Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + Elem: ClouddeployTargetAssociatedEntitiesSchema(), + Set: schema.HashResource(ClouddeployTargetAssociatedEntitiesSchema()), + }, + + "custom_target": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Information specifying a Custom Target.", + MaxItems: 1, + Elem: ClouddeployTargetCustomTargetSchema(), + ConflictsWith: []string{"gke", "anthos_cluster", "run", "multi_target"}, + }, + + "deploy_parameters": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The deploy parameters to use for this target.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the `Target`. Max length is 255 characters.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "execution_configs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`.", + Elem: ClouddeployTargetExecutionConfigsSchema(), + }, + + "gke": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a GKE Cluster.", + MaxItems: 1, + Elem: ClouddeployTargetGkeSchema(), + ConflictsWith: []string{"anthos_cluster", "run", "multi_target", "custom_target"}, + }, + + "multi_target": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a multiTarget.", + MaxItems: 1, + Elem: ClouddeployTargetMultiTargetSchema(), + ConflictsWith: []string{"gke", "anthos_cluster", "run", "custom_target"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "require_approval": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether or not the `Target` requires approval.", + }, + + "run": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a Cloud Run deployment target.", + MaxItems: 1, + Elem: ClouddeployTargetRunSchema(), + ConflictsWith: []string{"gke", "anthos_cluster", "multi_target", "custom_target"}, + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the `Target` was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "target_id": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Resource id of the `Target`.", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique identifier of the `Target`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Most recent time at which the `Target` was updated.", + }, + }, + } +} + +func ClouddeployTargetAnthosClusterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "membership": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + }, + }, + } +} + +func ClouddeployTargetAssociatedEntitiesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "entity_id": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "anthos_clusters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Information specifying Anthos clusters as associated entities.", + Elem: ClouddeployTargetAssociatedEntitiesAnthosClustersSchema(), + }, + + "gke_clusters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Information specifying GKE clusters as associated entities.", + Elem: ClouddeployTargetAssociatedEntitiesGkeClustersSchema(), + }, + }, + } +} + +func ClouddeployTargetAssociatedEntitiesAnthosClustersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "membership": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + }, + }, + } +} + +func ClouddeployTargetAssociatedEntitiesGkeClustersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", + }, + + "internal_ip": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + }, + + "proxy_url": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, + }, + } +} + +func ClouddeployTargetCustomTargetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "custom_target_type": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.", + }, + }, + } +} + +func ClouddeployTargetExecutionConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "usages": { + Type: schema.TypeList, + Required: true, + Description: "Required. Usages when this configuration should be applied.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "artifact_storage": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", + }, + + "execution_timeout": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used.", + }, + + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used.", + }, + + "verbose": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If true, additional logging will be enabled when running builds in this execution environment.", + }, + + "worker_pool": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used.", + }, + }, + } +} + +func ClouddeployTargetGkeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.", + }, + + "dns_endpoint": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dns_endpoint` and `internal_ip` cannot be set to true.", + }, + + "internal_ip": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + }, + + "proxy_url": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, + }, + } +} + +func ClouddeployTargetMultiTargetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_ids": { + Type: schema.TypeList, + Required: true, + Description: "Required. The target_ids of this multiTarget.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployTargetRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", + }, + }, + } +} + +func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyTarget(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Target: %s", err) + } + + log.Printf("[DEBUG] Finished creating Target %q: %#v", d.Id(), res) + + return resourceClouddeployTargetRead(d, meta) +} + +func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetTarget(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ClouddeployTarget %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("anthos_cluster", flattenClouddeployTargetAnthosCluster(res.AnthosCluster)); err != nil { + return fmt.Errorf("error setting anthos_cluster in state: %s", err) + } + if err = d.Set("associated_entities", flattenClouddeployTargetAssociatedEntitiesMap(res.AssociatedEntities)); err != nil { + return fmt.Errorf("error setting associated_entities in state: %s", err) + } + if err = d.Set("custom_target", flattenClouddeployTargetCustomTarget(res.CustomTarget)); err != nil { + return fmt.Errorf("error setting custom_target in state: %s", err) + } + if err = d.Set("deploy_parameters", res.DeployParameters); err != nil { + return fmt.Errorf("error setting deploy_parameters in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("execution_configs", flattenClouddeployTargetExecutionConfigsArray(res.ExecutionConfigs)); err != nil { + return fmt.Errorf("error setting execution_configs in state: %s", err) + } + if err = d.Set("gke", flattenClouddeployTargetGke(res.Gke)); err != nil { + return fmt.Errorf("error setting gke in state: %s", err) + } + if err = d.Set("multi_target", flattenClouddeployTargetMultiTarget(res.MultiTarget)); err != nil { + return fmt.Errorf("error setting multi_target in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("require_approval", res.RequireApproval); err != nil { + return fmt.Errorf("error setting require_approval in state: %s", err) + } + if err = d.Set("run", flattenClouddeployTargetRun(res.Run)); err != nil { + return fmt.Errorf("error setting run in state: %s", err) + } + if err = d.Set("annotations", flattenClouddeployTargetAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("labels", flattenClouddeployTargetLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("target_id", res.TargetId); err != nil { + return fmt.Errorf("error setting target_id in state: %s", err) + } + if err = d.Set("terraform_labels", flattenClouddeployTargetTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceClouddeployTargetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyTarget(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Target: %s", err) + } + + log.Printf("[DEBUG] Finished creating Target %q: %#v", d.Id(), res) + + return resourceClouddeployTargetRead(d, meta) +} + +func resourceClouddeployTargetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + + log.Printf("[DEBUG] Deleting Target %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteTarget(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Target: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Target %q", d.Id()) + return nil +} + +func resourceClouddeployTargetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/targets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/targets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandClouddeployTargetAnthosCluster(o interface{}) *TargetAnthosCluster { + if o == nil { + return EmptyTargetAnthosCluster + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetAnthosCluster + } + obj := objArr[0].(map[string]interface{}) + return &TargetAnthosCluster{ + Membership: dcl.String(obj["membership"].(string)), + } +} + +func flattenClouddeployTargetAnthosCluster(obj *TargetAnthosCluster) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandClouddeployTargetAssociatedEntitiesMap(o interface{}) map[string]TargetAssociatedEntities { + if o == nil { + return make(map[string]TargetAssociatedEntities) + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make(map[string]TargetAssociatedEntities) + } + + items := make(map[string]TargetAssociatedEntities) + for _, item := range objs { + i := expandClouddeployTargetAssociatedEntities(item) + if item != nil { + items[item.(map[string]interface{})["entity_id"].(string)] = *i + } + } + + return items +} + +func expandClouddeployTargetAssociatedEntities(o interface{}) *TargetAssociatedEntities { + if o == nil { + return EmptyTargetAssociatedEntities + } + + obj := o.(map[string]interface{}) + return &TargetAssociatedEntities{ + AnthosClusters: expandClouddeployTargetAssociatedEntitiesAnthosClustersArray(obj["anthos_clusters"]), + GkeClusters: expandClouddeployTargetAssociatedEntitiesGkeClustersArray(obj["gke_clusters"]), + } +} + +func flattenClouddeployTargetAssociatedEntitiesMap(objs map[string]TargetAssociatedEntities) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenClouddeployTargetAssociatedEntities(&item, name) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetAssociatedEntities(obj *TargetAssociatedEntities, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "anthos_clusters": flattenClouddeployTargetAssociatedEntitiesAnthosClustersArray(obj.AnthosClusters), + "gke_clusters": flattenClouddeployTargetAssociatedEntitiesGkeClustersArray(obj.GkeClusters), + } + + transformed["entity_id"] = name + + return transformed + +} +func expandClouddeployTargetAssociatedEntitiesAnthosClustersArray(o interface{}) []TargetAssociatedEntitiesAnthosClusters { + if o == nil { + return make([]TargetAssociatedEntitiesAnthosClusters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]TargetAssociatedEntitiesAnthosClusters, 0) + } + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployTargetAssociatedEntitiesAnthosClusters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployTargetAssociatedEntitiesAnthosClusters(o interface{}) *TargetAssociatedEntitiesAnthosClusters { + if o == nil { + return EmptyTargetAssociatedEntitiesAnthosClusters + } + + obj := o.(map[string]interface{}) + return &TargetAssociatedEntitiesAnthosClusters{ + Membership: dcl.String(obj["membership"].(string)), + } +} + +func flattenClouddeployTargetAssociatedEntitiesAnthosClustersArray(objs []TargetAssociatedEntitiesAnthosClusters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployTargetAssociatedEntitiesAnthosClusters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetAssociatedEntitiesAnthosClusters(obj *TargetAssociatedEntitiesAnthosClusters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "membership": obj.Membership, + } + + return transformed + +} +func expandClouddeployTargetAssociatedEntitiesGkeClustersArray(o interface{}) []TargetAssociatedEntitiesGkeClusters { + if o == nil { + return make([]TargetAssociatedEntitiesGkeClusters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]TargetAssociatedEntitiesGkeClusters, 0) + } + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployTargetAssociatedEntitiesGkeClusters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployTargetAssociatedEntitiesGkeClusters(o interface{}) *TargetAssociatedEntitiesGkeClusters { + if o == nil { + return EmptyTargetAssociatedEntitiesGkeClusters + } + + obj := o.(map[string]interface{}) + return &TargetAssociatedEntitiesGkeClusters{ + Cluster: dcl.String(obj["cluster"].(string)), + InternalIP: dcl.Bool(obj["internal_ip"].(bool)), + ProxyUrl: dcl.String(obj["proxy_url"].(string)), + } +} + +func flattenClouddeployTargetAssociatedEntitiesGkeClustersArray(objs []TargetAssociatedEntitiesGkeClusters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployTargetAssociatedEntitiesGkeClusters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetAssociatedEntitiesGkeClusters(obj *TargetAssociatedEntitiesGkeClusters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster": obj.Cluster, + "internal_ip": obj.InternalIP, + "proxy_url": obj.ProxyUrl, + } + + return transformed + +} + +func expandClouddeployTargetCustomTarget(o interface{}) *TargetCustomTarget { + if o == nil { + return EmptyTargetCustomTarget + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetCustomTarget + } + obj := objArr[0].(map[string]interface{}) + return &TargetCustomTarget{ + CustomTargetType: dcl.String(obj["custom_target_type"].(string)), + } +} + +func flattenClouddeployTargetCustomTarget(obj *TargetCustomTarget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "custom_target_type": obj.CustomTargetType, + } + + return []interface{}{transformed} + +} +func expandClouddeployTargetExecutionConfigsArray(o interface{}) []TargetExecutionConfigs { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]TargetExecutionConfigs, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployTargetExecutionConfigs(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployTargetExecutionConfigs(o interface{}) *TargetExecutionConfigs { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &TargetExecutionConfigs{ + Usages: expandClouddeployTargetExecutionConfigsUsagesArray(obj["usages"]), + ArtifactStorage: dcl.StringOrNil(obj["artifact_storage"].(string)), + ExecutionTimeout: dcl.StringOrNil(obj["execution_timeout"].(string)), + ServiceAccount: dcl.StringOrNil(obj["service_account"].(string)), + Verbose: dcl.Bool(obj["verbose"].(bool)), + WorkerPool: dcl.String(obj["worker_pool"].(string)), + } +} + +func flattenClouddeployTargetExecutionConfigsArray(objs []TargetExecutionConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployTargetExecutionConfigs(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetExecutionConfigs(obj *TargetExecutionConfigs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "usages": flattenClouddeployTargetExecutionConfigsUsagesArray(obj.Usages), + "artifact_storage": obj.ArtifactStorage, + "execution_timeout": obj.ExecutionTimeout, + "service_account": obj.ServiceAccount, + "verbose": obj.Verbose, + "worker_pool": obj.WorkerPool, + } + + return transformed + +} + +func expandClouddeployTargetGke(o interface{}) *TargetGke { + if o == nil { + return EmptyTargetGke + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetGke + } + obj := objArr[0].(map[string]interface{}) + return &TargetGke{ + Cluster: dcl.String(obj["cluster"].(string)), + DnsEndpoint: dcl.Bool(obj["dns_endpoint"].(bool)), + InternalIP: dcl.Bool(obj["internal_ip"].(bool)), + ProxyUrl: dcl.String(obj["proxy_url"].(string)), + } +} + +func flattenClouddeployTargetGke(obj *TargetGke) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster": obj.Cluster, + "dns_endpoint": obj.DnsEndpoint, + "internal_ip": obj.InternalIP, + "proxy_url": obj.ProxyUrl, + } + + return []interface{}{transformed} + +} + +func expandClouddeployTargetMultiTarget(o interface{}) *TargetMultiTarget { + if o == nil { + return EmptyTargetMultiTarget + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetMultiTarget + } + obj := objArr[0].(map[string]interface{}) + return &TargetMultiTarget{ + TargetIds: tpgdclresource.ExpandStringArray(obj["target_ids"]), + } +} + +func flattenClouddeployTargetMultiTarget(obj *TargetMultiTarget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "target_ids": obj.TargetIds, + } + + return []interface{}{transformed} + +} + +func expandClouddeployTargetRun(o interface{}) *TargetRun { + if o == nil { + return EmptyTargetRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetRun + } + obj := objArr[0].(map[string]interface{}) + return &TargetRun{ + Location: dcl.String(obj["location"].(string)), + } +} + +func flattenClouddeployTargetRun(obj *TargetRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location": obj.Location, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployTargetLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployTargetTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployTargetAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployTargetExecutionConfigsUsagesArray(obj []TargetExecutionConfigsUsagesEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandClouddeployTargetExecutionConfigsUsagesArray(o interface{}) []TargetExecutionConfigsUsagesEnum { + objs := o.([]interface{}) + items := make([]TargetExecutionConfigsUsagesEnum, 0, len(objs)) + for _, item := range objs { + i := TargetExecutionConfigsUsagesEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl new file mode 100644 index 000000000000..635b54feeef3 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl @@ -0,0 +1,536 @@ +package clouddeploy_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccClouddeployTarget_Target(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_Target(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate1(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate2(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate3(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func testAccClouddeployTarget_Target(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + + deploy_parameters = { + deployParameterKey = "deployParameterValue" + } + + description = "basic description" + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/example-cluster-name" + } + + project = "%{project_name}" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate1(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + artifact_storage = "gs://my-bucket/my-dir" + service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" + } + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + execution_configs { + usages = ["RENDER"] + artifact_storage = "gs://my-bucket/my-dir" + service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" + } + + execution_configs { + usages = ["DEPLOY"] + artifact_storage = "gs://deploy-bucket/deploy-dir" + service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" + worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" + } + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate3(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + execution_configs { + usages = ["RENDER"] + artifact_storage = "gs://other-bucket/other-dir" + service_account = "other-owner@%{project_name}.iam.gserviceaccount.com" + verbose = true + } + + execution_configs { + usages = ["DEPLOY"] + artifact_storage = "gs://deploy-bucket/deploy-dir" + service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" + worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" + } + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + proxy_url = "http://10.0.0.1" + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +{{- if ne $.TargetVersionName "ga" }} +func TestAccClouddeployTarget_MultiTarget(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_MultiTarget(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_MultiTargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func TestAccClouddeployTarget_RunTarget(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_RunTarget(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_RunTargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func testAccClouddeployTarget_MultiTarget(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "multi-target description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + multi_target { + target_ids = ["1", "2"] + } + + project = "%{project_name}" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_MultiTargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated mutli-target description" + + multi_target { + target_ids = ["1", "2", "3"] + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_RunTarget(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "basic description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + project = "%{project_name}" + require_approval = false + + run { + location = "projects/%{project_name}/locations/%{region}" + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_RunTargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "basic description" + project = "%{project_name}" + require_approval = true + + run { + location = "projects/%{project_name}/locations/%{region}" + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} +{{- end }} + +func testAccCheckClouddeployTargetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_clouddeploy_target" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &clouddeploy.Target{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + RequireApproval: dcl.Bool(rs.Primary.Attributes["require_approval"] == "true"), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + TargetId: dcl.StringOrNil(rs.Primary.Attributes["target_id"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := clouddeploy.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetTarget(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_clouddeploy_target still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml index 647944fdd0d3..edffa3158112 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_clouddeploy_target' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'clouddeploy.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Target' diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go new file mode 100644 index 000000000000..6b7be9f302ea --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go @@ -0,0 +1,53 @@ +package clouddeploy + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("ClouddeployTarget", testSweepClouddeployTarget) +} + +func testSweepClouddeployTarget(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ClouddeployTarget") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLClouddeployClient(config, config.UserAgent, "", 0) + err = client.DeleteAllTarget(context.Background(), d["project"], d["location"], isDeletableClouddeployTarget) + if err != nil { + return err + } + return nil +} + +func isDeletableClouddeployTarget(r *Target) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl new file mode 100644 index 000000000000..66a10373229c --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl @@ -0,0 +1,877 @@ +package clouddeploy + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Target struct { + Name *string `json:"name"` + TargetId *string `json:"targetId"` + Uid *string `json:"uid"` + Description *string `json:"description"` + Annotations map[string]string `json:"annotations"` + Labels map[string]string `json:"labels"` + RequireApproval *bool `json:"requireApproval"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Gke *TargetGke `json:"gke"` + AnthosCluster *TargetAnthosCluster `json:"anthosCluster"` + Etag *string `json:"etag"` + ExecutionConfigs []TargetExecutionConfigs `json:"executionConfigs"` + Project *string `json:"project"` + Location *string `json:"location"` + Run *TargetRun `json:"run"` + MultiTarget *TargetMultiTarget `json:"multiTarget"` + DeployParameters map[string]string `json:"deployParameters"` + CustomTarget *TargetCustomTarget `json:"customTarget"` + AssociatedEntities map[string]TargetAssociatedEntities `json:"associatedEntities"` +} + +func (r *Target) String() string { + return dcl.SprintResource(r) +} + +// The enum TargetExecutionConfigsUsagesEnum. +type TargetExecutionConfigsUsagesEnum string + +// TargetExecutionConfigsUsagesEnumRef returns a *TargetExecutionConfigsUsagesEnum with the value of string s +// If the empty string is provided, nil is returned. +func TargetExecutionConfigsUsagesEnumRef(s string) *TargetExecutionConfigsUsagesEnum { + v := TargetExecutionConfigsUsagesEnum(s) + return &v +} + +func (v TargetExecutionConfigsUsagesEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED", "RENDER", "DEPLOY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "TargetExecutionConfigsUsagesEnum", + Value: string(v), + Valid: []string{}, + } +} + +type TargetGke struct { + empty bool `json:"-"` + Cluster *string `json:"cluster"` + InternalIP *bool `json:"internalIP"` + ProxyUrl *string `json:"proxyUrl"` + DnsEndpoint *bool `json:"dnsEndpoint"` +} + +type jsonTargetGke TargetGke + +func (r *TargetGke) UnmarshalJSON(data []byte) error { + var res jsonTargetGke + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetGke + } else { + + r.Cluster = res.Cluster + + r.InternalIP = res.InternalIP + + r.ProxyUrl = res.ProxyUrl + + r.DnsEndpoint = res.DnsEndpoint + + } + return nil +} + +// This object is used to assert a desired state where this TargetGke is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetGke *TargetGke = &TargetGke{empty: true} + +func (r *TargetGke) Empty() bool { + return r.empty +} + +func (r *TargetGke) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetGke) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAnthosCluster struct { + empty bool `json:"-"` + Membership *string `json:"membership"` +} + +type jsonTargetAnthosCluster TargetAnthosCluster + +func (r *TargetAnthosCluster) UnmarshalJSON(data []byte) error { + var res jsonTargetAnthosCluster + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAnthosCluster + } else { + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this TargetAnthosCluster is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAnthosCluster *TargetAnthosCluster = &TargetAnthosCluster{empty: true} + +func (r *TargetAnthosCluster) Empty() bool { + return r.empty +} + +func (r *TargetAnthosCluster) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAnthosCluster) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetExecutionConfigs struct { + empty bool `json:"-"` + Usages []TargetExecutionConfigsUsagesEnum `json:"usages"` + WorkerPool *string `json:"workerPool"` + ServiceAccount *string `json:"serviceAccount"` + ArtifactStorage *string `json:"artifactStorage"` + ExecutionTimeout *string `json:"executionTimeout"` + Verbose *bool `json:"verbose"` +} + +type jsonTargetExecutionConfigs TargetExecutionConfigs + +func (r *TargetExecutionConfigs) UnmarshalJSON(data []byte) error { + var res jsonTargetExecutionConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetExecutionConfigs + } else { + + r.Usages = res.Usages + + r.WorkerPool = res.WorkerPool + + r.ServiceAccount = res.ServiceAccount + + r.ArtifactStorage = res.ArtifactStorage + + r.ExecutionTimeout = res.ExecutionTimeout + + r.Verbose = res.Verbose + + } + return nil +} + +// This object is used to assert a desired state where this TargetExecutionConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetExecutionConfigs *TargetExecutionConfigs = &TargetExecutionConfigs{empty: true} + +func (r *TargetExecutionConfigs) Empty() bool { + return r.empty +} + +func (r *TargetExecutionConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetExecutionConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetRun struct { + empty bool `json:"-"` + Location *string `json:"location"` +} + +type jsonTargetRun TargetRun + +func (r *TargetRun) UnmarshalJSON(data []byte) error { + var res jsonTargetRun + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetRun + } else { + + r.Location = res.Location + + } + return nil +} + +// This object is used to assert a desired state where this TargetRun is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetRun *TargetRun = &TargetRun{empty: true} + +func (r *TargetRun) Empty() bool { + return r.empty +} + +func (r *TargetRun) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetRun) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetMultiTarget struct { + empty bool `json:"-"` + TargetIds []string `json:"targetIds"` +} + +type jsonTargetMultiTarget TargetMultiTarget + +func (r *TargetMultiTarget) UnmarshalJSON(data []byte) error { + var res jsonTargetMultiTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetMultiTarget + } else { + + r.TargetIds = res.TargetIds + + } + return nil +} + +// This object is used to assert a desired state where this TargetMultiTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetMultiTarget *TargetMultiTarget = &TargetMultiTarget{empty: true} + +func (r *TargetMultiTarget) Empty() bool { + return r.empty +} + +func (r *TargetMultiTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetMultiTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetCustomTarget struct { + empty bool `json:"-"` + CustomTargetType *string `json:"customTargetType"` +} + +type jsonTargetCustomTarget TargetCustomTarget + +func (r *TargetCustomTarget) UnmarshalJSON(data []byte) error { + var res jsonTargetCustomTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetCustomTarget + } else { + + r.CustomTargetType = res.CustomTargetType + + } + return nil +} + +// This object is used to assert a desired state where this TargetCustomTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetCustomTarget *TargetCustomTarget = &TargetCustomTarget{empty: true} + +func (r *TargetCustomTarget) Empty() bool { + return r.empty +} + +func (r *TargetCustomTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetCustomTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAssociatedEntities struct { + empty bool `json:"-"` + GkeClusters []TargetAssociatedEntitiesGkeClusters `json:"gkeClusters"` + AnthosClusters []TargetAssociatedEntitiesAnthosClusters `json:"anthosClusters"` +} + +type jsonTargetAssociatedEntities TargetAssociatedEntities + +func (r *TargetAssociatedEntities) UnmarshalJSON(data []byte) error { + var res jsonTargetAssociatedEntities + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAssociatedEntities + } else { + + r.GkeClusters = res.GkeClusters + + r.AnthosClusters = res.AnthosClusters + + } + return nil +} + +// This object is used to assert a desired state where this TargetAssociatedEntities is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAssociatedEntities *TargetAssociatedEntities = &TargetAssociatedEntities{empty: true} + +func (r *TargetAssociatedEntities) Empty() bool { + return r.empty +} + +func (r *TargetAssociatedEntities) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAssociatedEntities) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAssociatedEntitiesGkeClusters struct { + empty bool `json:"-"` + Cluster *string `json:"cluster"` + InternalIP *bool `json:"internalIP"` + ProxyUrl *string `json:"proxyUrl"` +} + +type jsonTargetAssociatedEntitiesGkeClusters TargetAssociatedEntitiesGkeClusters + +func (r *TargetAssociatedEntitiesGkeClusters) UnmarshalJSON(data []byte) error { + var res jsonTargetAssociatedEntitiesGkeClusters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAssociatedEntitiesGkeClusters + } else { + + r.Cluster = res.Cluster + + r.InternalIP = res.InternalIP + + r.ProxyUrl = res.ProxyUrl + + } + return nil +} + +// This object is used to assert a desired state where this TargetAssociatedEntitiesGkeClusters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAssociatedEntitiesGkeClusters *TargetAssociatedEntitiesGkeClusters = &TargetAssociatedEntitiesGkeClusters{empty: true} + +func (r *TargetAssociatedEntitiesGkeClusters) Empty() bool { + return r.empty +} + +func (r *TargetAssociatedEntitiesGkeClusters) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAssociatedEntitiesGkeClusters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAssociatedEntitiesAnthosClusters struct { + empty bool `json:"-"` + Membership *string `json:"membership"` +} + +type jsonTargetAssociatedEntitiesAnthosClusters TargetAssociatedEntitiesAnthosClusters + +func (r *TargetAssociatedEntitiesAnthosClusters) UnmarshalJSON(data []byte) error { + var res jsonTargetAssociatedEntitiesAnthosClusters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAssociatedEntitiesAnthosClusters + } else { + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this TargetAssociatedEntitiesAnthosClusters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAssociatedEntitiesAnthosClusters *TargetAssociatedEntitiesAnthosClusters = &TargetAssociatedEntitiesAnthosClusters{empty: true} + +func (r *TargetAssociatedEntitiesAnthosClusters) Empty() bool { + return r.empty +} + +func (r *TargetAssociatedEntitiesAnthosClusters) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAssociatedEntitiesAnthosClusters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Target) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "clouddeploy", + Type: "Target", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "clouddeploy", +{{- end }} + } +} + +func (r *Target) ID() (string, error) { + if err := extractTargetFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "target_id": dcl.ValueOrEmptyString(nr.TargetId), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "description": dcl.ValueOrEmptyString(nr.Description), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "require_approval": dcl.ValueOrEmptyString(nr.RequireApproval), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "gke": dcl.ValueOrEmptyString(nr.Gke), + "anthos_cluster": dcl.ValueOrEmptyString(nr.AnthosCluster), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "execution_configs": dcl.ValueOrEmptyString(nr.ExecutionConfigs), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "run": dcl.ValueOrEmptyString(nr.Run), + "multi_target": dcl.ValueOrEmptyString(nr.MultiTarget), + "deploy_parameters": dcl.ValueOrEmptyString(nr.DeployParameters), + "custom_target": dcl.ValueOrEmptyString(nr.CustomTarget), + "associated_entities": dcl.ValueOrEmptyString(nr.AssociatedEntities), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const TargetMaxPage = -1 + +type TargetList struct { + Items []*Target + + nextToken string + + pageSize int32 + + resource *Target +} + +func (l *TargetList) HasNext() bool { + return l.nextToken != "" +} + +func (l *TargetList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listTarget(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListTarget(ctx context.Context, project, location string) (*TargetList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListTargetWithMaxResults(ctx, project, location, TargetMaxPage) + +} + +func (c *Client) ListTargetWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*TargetList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Target{ + Project: &project, + Location: &location, + } + items, token, err := c.listTarget(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &TargetList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetTarget(ctx context.Context, r *Target) (*Target, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractTargetFields(r) + + b, err := c.getTargetRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalTarget(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeTargetNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractTargetFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteTarget(ctx context.Context, r *Target) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Target resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Target...") + deleteOp := deleteTargetOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllTarget deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllTarget(ctx context.Context, project, location string, filter func(*Target) bool) error { + listObj, err := c.ListTarget(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllTarget(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllTarget(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyTarget(ctx context.Context, rawDesired *Target, opts ...dcl.ApplyOption) (*Target, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Target + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyTargetHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyTargetHelper(c *Client, ctx context.Context, rawDesired *Target, opts ...dcl.ApplyOption) (*Target, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyTarget...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractTargetFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.targetDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToTargetDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []targetApiOperation + if create { + ops = append(ops, &createTargetOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyTargetDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyTargetDiff(c *Client, ctx context.Context, desired *Target, rawDesired *Target, ops []targetApiOperation, opts ...dcl.ApplyOption) (*Target, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetTarget(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createTargetOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapTarget(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeTargetNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeTargetNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeTargetDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractTargetFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractTargetFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffTarget(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Target) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/target_internal.go b/mmv1/third_party/terraform/services/clouddeploy/target_internal.go new file mode 100644 index 000000000000..52c5afa45af9 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/target_internal.go @@ -0,0 +1,4011 @@ +package clouddeploy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Target) validate() error { + + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Gke", "AnthosCluster", "Run", "MultiTarget", "CustomTarget"}, r.Gke, r.AnthosCluster, r.Run, r.MultiTarget, r.CustomTarget); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Gke) { + if err := r.Gke.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AnthosCluster) { + if err := r.AnthosCluster.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Run) { + if err := r.Run.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MultiTarget) { + if err := r.MultiTarget.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CustomTarget) { + if err := r.CustomTarget.validate(); err != nil { + return err + } + } + return nil +} +func (r *TargetGke) validate() error { + return nil +} +func (r *TargetAnthosCluster) validate() error { + return nil +} +func (r *TargetExecutionConfigs) validate() error { + if err := dcl.Required(r, "usages"); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string(nil)); err != nil { + return err + } + return nil +} +func (r *TargetRun) validate() error { + if err := dcl.Required(r, "location"); err != nil { + return err + } + return nil +} +func (r *TargetMultiTarget) validate() error { + if err := dcl.Required(r, "targetIds"); err != nil { + return err + } + return nil +} +func (r *TargetCustomTarget) validate() error { + if err := dcl.Required(r, "customTargetType"); err != nil { + return err + } + return nil +} +func (r *TargetAssociatedEntities) validate() error { + return nil +} +func (r *TargetAssociatedEntitiesGkeClusters) validate() error { + return nil +} +func (r *TargetAssociatedEntitiesAnthosClusters) validate() error { + return nil +} +func (r *Target) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://clouddeploy.googleapis.com/v1/", params) +} + +func (r *Target) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/targets/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Target) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/targets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Target) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/targets?targetId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Target) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/targets/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Target) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Target) SetPolicyVerb() string { + return "" +} + +func (r *Target) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Target) IAMPolicyVersion() int { + return 3 +} + +// targetApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type targetApiOperation interface { + do(context.Context, *Target, *Client) error +} + +// newUpdateTargetUpdateTargetRequest creates a request for an +// Target resource's UpdateTarget update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateTargetUpdateTargetRequest(ctx context.Context, f *Target, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.RequireApproval; !dcl.IsEmptyValueIndirect(v) { + req["requireApproval"] = v + } + if v, err := expandTargetGke(c, f.Gke, res); err != nil { + return nil, fmt.Errorf("error expanding Gke into gke: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["gke"] = v + } + if v, err := expandTargetAnthosCluster(c, f.AnthosCluster, res); err != nil { + return nil, fmt.Errorf("error expanding AnthosCluster into anthosCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["anthosCluster"] = v + } + if v, err := expandTargetExecutionConfigsSlice(c, f.ExecutionConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding ExecutionConfigs into executionConfigs: %w", err) + } else if v != nil { + req["executionConfigs"] = v + } + if v, err := expandTargetRun(c, f.Run, res); err != nil { + return nil, fmt.Errorf("error expanding Run into run: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["run"] = v + } + if v, err := expandTargetMultiTarget(c, f.MultiTarget, res); err != nil { + return nil, fmt.Errorf("error expanding MultiTarget into multiTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["multiTarget"] = v + } + if v := f.DeployParameters; !dcl.IsEmptyValueIndirect(v) { + req["deployParameters"] = v + } + if v, err := expandTargetCustomTarget(c, f.CustomTarget, res); err != nil { + return nil, fmt.Errorf("error expanding CustomTarget into customTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["customTarget"] = v + } + if v, err := expandTargetAssociatedEntitiesMap(c, f.AssociatedEntities, res); err != nil { + return nil, fmt.Errorf("error expanding AssociatedEntities into associatedEntities: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["associatedEntities"] = v + } + b, err := c.getTargetRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/targets/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateTargetUpdateTargetRequest converts the update into +// the final JSON request body. +func marshalUpdateTargetUpdateTargetRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateTargetUpdateTargetOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateTargetUpdateTargetOperation) do(ctx context.Context, r *Target, c *Client) error { + _, err := c.GetTarget(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateTarget") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateTargetUpdateTargetRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateTargetUpdateTargetRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listTargetRaw(ctx context.Context, r *Target, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != TargetMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listTargetOperation struct { + Targets []map[string]interface{} `json:"targets"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listTarget(ctx context.Context, r *Target, pageToken string, pageSize int32) ([]*Target, string, error) { + b, err := c.listTargetRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listTargetOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Target + for _, v := range m.Targets { + res, err := unmarshalMapTarget(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllTarget(ctx context.Context, f func(*Target) bool, resources []*Target) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteTarget(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteTargetOperation struct{} + +func (op *deleteTargetOperation) do(ctx context.Context, r *Target, c *Client) error { + r, err := c.GetTarget(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Target not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetTarget checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetTarget(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createTargetOperation struct { + response map[string]interface{} +} + +func (op *createTargetOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createTargetOperation) do(ctx context.Context, r *Target, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetTarget(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getTargetRaw(ctx context.Context, r *Target) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) targetDiffsForRawDesired(ctx context.Context, rawDesired *Target, opts ...dcl.ApplyOption) (initial, desired *Target, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Target + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Target); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Target, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetTarget(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Target resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Target resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Target resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeTargetDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Target: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Target: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractTargetFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeTargetInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Target: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeTargetDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Target: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffTarget(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeTargetInitialState(rawInitial, rawDesired *Target) (*Target, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.Gke) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.AnthosCluster, rawInitial.Run, rawInitial.MultiTarget, rawInitial.CustomTarget) { + rawInitial.Gke = EmptyTargetGke + } + } + + if !dcl.IsZeroValue(rawInitial.AnthosCluster) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.Run, rawInitial.MultiTarget, rawInitial.CustomTarget) { + rawInitial.AnthosCluster = EmptyTargetAnthosCluster + } + } + + if !dcl.IsZeroValue(rawInitial.Run) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster, rawInitial.MultiTarget, rawInitial.CustomTarget) { + rawInitial.Run = EmptyTargetRun + } + } + + if !dcl.IsZeroValue(rawInitial.MultiTarget) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster, rawInitial.Run, rawInitial.CustomTarget) { + rawInitial.MultiTarget = EmptyTargetMultiTarget + } + } + + if !dcl.IsZeroValue(rawInitial.CustomTarget) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster, rawInitial.Run, rawInitial.MultiTarget) { + rawInitial.CustomTarget = EmptyTargetCustomTarget + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeTargetDesiredState(rawDesired, rawInitial *Target, opts ...dcl.ApplyOption) (*Target, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Gke = canonicalizeTargetGke(rawDesired.Gke, nil, opts...) + rawDesired.AnthosCluster = canonicalizeTargetAnthosCluster(rawDesired.AnthosCluster, nil, opts...) + rawDesired.Run = canonicalizeTargetRun(rawDesired.Run, nil, opts...) + rawDesired.MultiTarget = canonicalizeTargetMultiTarget(rawDesired.MultiTarget, nil, opts...) + rawDesired.CustomTarget = canonicalizeTargetCustomTarget(rawDesired.CustomTarget, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Target{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.BoolCanonicalize(rawDesired.RequireApproval, rawInitial.RequireApproval) { + canonicalDesired.RequireApproval = rawInitial.RequireApproval + } else { + canonicalDesired.RequireApproval = rawDesired.RequireApproval + } + canonicalDesired.Gke = canonicalizeTargetGke(rawDesired.Gke, rawInitial.Gke, opts...) + canonicalDesired.AnthosCluster = canonicalizeTargetAnthosCluster(rawDesired.AnthosCluster, rawInitial.AnthosCluster, opts...) + canonicalDesired.ExecutionConfigs = canonicalizeTargetExecutionConfigsSlice(rawDesired.ExecutionConfigs, rawInitial.ExecutionConfigs, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.Run = canonicalizeTargetRun(rawDesired.Run, rawInitial.Run, opts...) + canonicalDesired.MultiTarget = canonicalizeTargetMultiTarget(rawDesired.MultiTarget, rawInitial.MultiTarget, opts...) + if dcl.IsZeroValue(rawDesired.DeployParameters) || (dcl.IsEmptyValueIndirect(rawDesired.DeployParameters) && dcl.IsEmptyValueIndirect(rawInitial.DeployParameters)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.DeployParameters = rawInitial.DeployParameters + } else { + canonicalDesired.DeployParameters = rawDesired.DeployParameters + } + canonicalDesired.CustomTarget = canonicalizeTargetCustomTarget(rawDesired.CustomTarget, rawInitial.CustomTarget, opts...) + if dcl.IsZeroValue(rawDesired.AssociatedEntities) || (dcl.IsEmptyValueIndirect(rawDesired.AssociatedEntities) && dcl.IsEmptyValueIndirect(rawInitial.AssociatedEntities)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.AssociatedEntities = rawInitial.AssociatedEntities + } else { + canonicalDesired.AssociatedEntities = rawDesired.AssociatedEntities + } + + if canonicalDesired.Gke != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.AnthosCluster, rawDesired.Run, rawDesired.MultiTarget, rawDesired.CustomTarget) { + canonicalDesired.Gke = EmptyTargetGke + } + } + + if canonicalDesired.AnthosCluster != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.Run, rawDesired.MultiTarget, rawDesired.CustomTarget) { + canonicalDesired.AnthosCluster = EmptyTargetAnthosCluster + } + } + + if canonicalDesired.Run != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster, rawDesired.MultiTarget, rawDesired.CustomTarget) { + canonicalDesired.Run = EmptyTargetRun + } + } + + if canonicalDesired.MultiTarget != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster, rawDesired.Run, rawDesired.CustomTarget) { + canonicalDesired.MultiTarget = EmptyTargetMultiTarget + } + } + + if canonicalDesired.CustomTarget != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster, rawDesired.Run, rawDesired.MultiTarget) { + canonicalDesired.CustomTarget = EmptyTargetCustomTarget + } + } + + return canonicalDesired, nil +} + +func canonicalizeTargetNewState(c *Client, rawNew, rawDesired *Target) (*Target, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.TargetId) && dcl.IsEmptyValueIndirect(rawDesired.TargetId) { + rawNew.TargetId = rawDesired.TargetId + } else { + if dcl.StringCanonicalize(rawDesired.TargetId, rawNew.TargetId) { + rawNew.TargetId = rawDesired.TargetId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.RequireApproval) && dcl.IsEmptyValueIndirect(rawDesired.RequireApproval) { + rawNew.RequireApproval = rawDesired.RequireApproval + } else { + if dcl.BoolCanonicalize(rawDesired.RequireApproval, rawNew.RequireApproval) { + rawNew.RequireApproval = rawDesired.RequireApproval + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Gke) && dcl.IsEmptyValueIndirect(rawDesired.Gke) { + rawNew.Gke = rawDesired.Gke + } else { + rawNew.Gke = canonicalizeNewTargetGke(c, rawDesired.Gke, rawNew.Gke) + } + + if dcl.IsEmptyValueIndirect(rawNew.AnthosCluster) && dcl.IsEmptyValueIndirect(rawDesired.AnthosCluster) { + rawNew.AnthosCluster = rawDesired.AnthosCluster + } else { + rawNew.AnthosCluster = canonicalizeNewTargetAnthosCluster(c, rawDesired.AnthosCluster, rawNew.AnthosCluster) + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ExecutionConfigs) && dcl.IsEmptyValueIndirect(rawDesired.ExecutionConfigs) { + rawNew.ExecutionConfigs = rawDesired.ExecutionConfigs + } else { + rawNew.ExecutionConfigs = canonicalizeNewTargetExecutionConfigsSlice(c, rawDesired.ExecutionConfigs, rawNew.ExecutionConfigs) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Run) && dcl.IsEmptyValueIndirect(rawDesired.Run) { + rawNew.Run = rawDesired.Run + } else { + rawNew.Run = canonicalizeNewTargetRun(c, rawDesired.Run, rawNew.Run) + } + + if dcl.IsEmptyValueIndirect(rawNew.MultiTarget) && dcl.IsEmptyValueIndirect(rawDesired.MultiTarget) { + rawNew.MultiTarget = rawDesired.MultiTarget + } else { + rawNew.MultiTarget = canonicalizeNewTargetMultiTarget(c, rawDesired.MultiTarget, rawNew.MultiTarget) + } + + if dcl.IsEmptyValueIndirect(rawNew.DeployParameters) && dcl.IsEmptyValueIndirect(rawDesired.DeployParameters) { + rawNew.DeployParameters = rawDesired.DeployParameters + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CustomTarget) && dcl.IsEmptyValueIndirect(rawDesired.CustomTarget) { + rawNew.CustomTarget = rawDesired.CustomTarget + } else { + rawNew.CustomTarget = canonicalizeNewTargetCustomTarget(c, rawDesired.CustomTarget, rawNew.CustomTarget) + } + + if dcl.IsEmptyValueIndirect(rawNew.AssociatedEntities) && dcl.IsEmptyValueIndirect(rawDesired.AssociatedEntities) { + rawNew.AssociatedEntities = rawDesired.AssociatedEntities + } else { + } + + return rawNew, nil +} + +func canonicalizeTargetGke(des, initial *TargetGke, opts ...dcl.ApplyOption) *TargetGke { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetGke{} + + if dcl.IsZeroValue(des.Cluster) || (dcl.IsEmptyValueIndirect(des.Cluster) && dcl.IsEmptyValueIndirect(initial.Cluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Cluster = initial.Cluster + } else { + cDes.Cluster = des.Cluster + } + if dcl.BoolCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { + cDes.InternalIP = initial.InternalIP + } else { + cDes.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, initial.ProxyUrl) || dcl.IsZeroValue(des.ProxyUrl) { + cDes.ProxyUrl = initial.ProxyUrl + } else { + cDes.ProxyUrl = des.ProxyUrl + } + if dcl.BoolCanonicalize(des.DnsEndpoint, initial.DnsEndpoint) || dcl.IsZeroValue(des.DnsEndpoint) { + cDes.DnsEndpoint = initial.DnsEndpoint + } else { + cDes.DnsEndpoint = des.DnsEndpoint + } + + return cDes +} + +func canonicalizeTargetGkeSlice(des, initial []TargetGke, opts ...dcl.ApplyOption) []TargetGke { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetGke, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetGke(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetGke, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetGke(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetGke(c *Client, des, nw *TargetGke) *TargetGke { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetGke while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.InternalIP, nw.InternalIP) { + nw.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, nw.ProxyUrl) { + nw.ProxyUrl = des.ProxyUrl + } + if dcl.BoolCanonicalize(des.DnsEndpoint, nw.DnsEndpoint) { + nw.DnsEndpoint = des.DnsEndpoint + } + + return nw +} + +func canonicalizeNewTargetGkeSet(c *Client, des, nw []TargetGke) []TargetGke { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetGke + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetGkeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetGke(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetGkeSlice(c *Client, des, nw []TargetGke) []TargetGke { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetGke + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetGke(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAnthosCluster(des, initial *TargetAnthosCluster, opts ...dcl.ApplyOption) *TargetAnthosCluster { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAnthosCluster{} + + if dcl.IsZeroValue(des.Membership) || (dcl.IsEmptyValueIndirect(des.Membership) && dcl.IsEmptyValueIndirect(initial.Membership)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Membership = initial.Membership + } else { + cDes.Membership = des.Membership + } + + return cDes +} + +func canonicalizeTargetAnthosClusterSlice(des, initial []TargetAnthosCluster, opts ...dcl.ApplyOption) []TargetAnthosCluster { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAnthosCluster, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAnthosCluster(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAnthosCluster, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAnthosCluster(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAnthosCluster(c *Client, des, nw *TargetAnthosCluster) *TargetAnthosCluster { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAnthosCluster while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewTargetAnthosClusterSet(c *Client, des, nw []TargetAnthosCluster) []TargetAnthosCluster { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAnthosCluster + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAnthosClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAnthosCluster(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAnthosClusterSlice(c *Client, des, nw []TargetAnthosCluster) []TargetAnthosCluster { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAnthosCluster + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAnthosCluster(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetExecutionConfigs(des, initial *TargetExecutionConfigs, opts ...dcl.ApplyOption) *TargetExecutionConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetExecutionConfigs{} + + if dcl.IsZeroValue(des.Usages) || (dcl.IsEmptyValueIndirect(des.Usages) && dcl.IsEmptyValueIndirect(initial.Usages)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Usages = initial.Usages + } else { + cDes.Usages = des.Usages + } + if dcl.IsZeroValue(des.WorkerPool) || (dcl.IsEmptyValueIndirect(des.WorkerPool) && dcl.IsEmptyValueIndirect(initial.WorkerPool)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WorkerPool = initial.WorkerPool + } else { + cDes.WorkerPool = des.WorkerPool + } + if dcl.StringCanonicalize(des.ServiceAccount, initial.ServiceAccount) || dcl.IsZeroValue(des.ServiceAccount) { + cDes.ServiceAccount = initial.ServiceAccount + } else { + cDes.ServiceAccount = des.ServiceAccount + } + if dcl.StringCanonicalize(des.ArtifactStorage, initial.ArtifactStorage) || dcl.IsZeroValue(des.ArtifactStorage) { + cDes.ArtifactStorage = initial.ArtifactStorage + } else { + cDes.ArtifactStorage = des.ArtifactStorage + } + if dcl.StringCanonicalize(des.ExecutionTimeout, initial.ExecutionTimeout) || dcl.IsZeroValue(des.ExecutionTimeout) { + cDes.ExecutionTimeout = initial.ExecutionTimeout + } else { + cDes.ExecutionTimeout = des.ExecutionTimeout + } + if dcl.BoolCanonicalize(des.Verbose, initial.Verbose) || dcl.IsZeroValue(des.Verbose) { + cDes.Verbose = initial.Verbose + } else { + cDes.Verbose = des.Verbose + } + + return cDes +} + +func canonicalizeTargetExecutionConfigsSlice(des, initial []TargetExecutionConfigs, opts ...dcl.ApplyOption) []TargetExecutionConfigs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetExecutionConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetExecutionConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetExecutionConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetExecutionConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetExecutionConfigs(c *Client, des, nw *TargetExecutionConfigs) *TargetExecutionConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetExecutionConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ServiceAccount, nw.ServiceAccount) { + nw.ServiceAccount = des.ServiceAccount + } + if dcl.StringCanonicalize(des.ArtifactStorage, nw.ArtifactStorage) { + nw.ArtifactStorage = des.ArtifactStorage + } + if dcl.StringCanonicalize(des.ExecutionTimeout, nw.ExecutionTimeout) { + nw.ExecutionTimeout = des.ExecutionTimeout + } + if dcl.BoolCanonicalize(des.Verbose, nw.Verbose) { + nw.Verbose = des.Verbose + } + + return nw +} + +func canonicalizeNewTargetExecutionConfigsSet(c *Client, des, nw []TargetExecutionConfigs) []TargetExecutionConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetExecutionConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetExecutionConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetExecutionConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetExecutionConfigsSlice(c *Client, des, nw []TargetExecutionConfigs) []TargetExecutionConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetExecutionConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetExecutionConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetRun(des, initial *TargetRun, opts ...dcl.ApplyOption) *TargetRun { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetRun{} + + if dcl.StringCanonicalize(des.Location, initial.Location) || dcl.IsZeroValue(des.Location) { + cDes.Location = initial.Location + } else { + cDes.Location = des.Location + } + + return cDes +} + +func canonicalizeTargetRunSlice(des, initial []TargetRun, opts ...dcl.ApplyOption) []TargetRun { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetRun, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetRun(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetRun, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetRun(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetRun(c *Client, des, nw *TargetRun) *TargetRun { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetRun while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Location, nw.Location) { + nw.Location = des.Location + } + + return nw +} + +func canonicalizeNewTargetRunSet(c *Client, des, nw []TargetRun) []TargetRun { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetRun + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetRunNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetRun(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetRunSlice(c *Client, des, nw []TargetRun) []TargetRun { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetRun + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetRun(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetMultiTarget(des, initial *TargetMultiTarget, opts ...dcl.ApplyOption) *TargetMultiTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetMultiTarget{} + + if dcl.StringArrayCanonicalize(des.TargetIds, initial.TargetIds) { + cDes.TargetIds = initial.TargetIds + } else { + cDes.TargetIds = des.TargetIds + } + + return cDes +} + +func canonicalizeTargetMultiTargetSlice(des, initial []TargetMultiTarget, opts ...dcl.ApplyOption) []TargetMultiTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetMultiTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetMultiTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetMultiTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetMultiTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetMultiTarget(c *Client, des, nw *TargetMultiTarget) *TargetMultiTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetMultiTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.TargetIds, nw.TargetIds) { + nw.TargetIds = des.TargetIds + } + + return nw +} + +func canonicalizeNewTargetMultiTargetSet(c *Client, des, nw []TargetMultiTarget) []TargetMultiTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetMultiTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetMultiTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetMultiTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetMultiTargetSlice(c *Client, des, nw []TargetMultiTarget) []TargetMultiTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetMultiTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetMultiTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetCustomTarget(des, initial *TargetCustomTarget, opts ...dcl.ApplyOption) *TargetCustomTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetCustomTarget{} + + if dcl.IsZeroValue(des.CustomTargetType) || (dcl.IsEmptyValueIndirect(des.CustomTargetType) && dcl.IsEmptyValueIndirect(initial.CustomTargetType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.CustomTargetType = initial.CustomTargetType + } else { + cDes.CustomTargetType = des.CustomTargetType + } + + return cDes +} + +func canonicalizeTargetCustomTargetSlice(des, initial []TargetCustomTarget, opts ...dcl.ApplyOption) []TargetCustomTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetCustomTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetCustomTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetCustomTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetCustomTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetCustomTarget(c *Client, des, nw *TargetCustomTarget) *TargetCustomTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetCustomTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewTargetCustomTargetSet(c *Client, des, nw []TargetCustomTarget) []TargetCustomTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetCustomTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetCustomTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetCustomTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetCustomTargetSlice(c *Client, des, nw []TargetCustomTarget) []TargetCustomTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetCustomTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetCustomTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAssociatedEntities(des, initial *TargetAssociatedEntities, opts ...dcl.ApplyOption) *TargetAssociatedEntities { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAssociatedEntities{} + + cDes.GkeClusters = canonicalizeTargetAssociatedEntitiesGkeClustersSlice(des.GkeClusters, initial.GkeClusters, opts...) + cDes.AnthosClusters = canonicalizeTargetAssociatedEntitiesAnthosClustersSlice(des.AnthosClusters, initial.AnthosClusters, opts...) + + return cDes +} + +func canonicalizeTargetAssociatedEntitiesSlice(des, initial []TargetAssociatedEntities, opts ...dcl.ApplyOption) []TargetAssociatedEntities { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAssociatedEntities, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAssociatedEntities(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAssociatedEntities, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAssociatedEntities(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAssociatedEntities(c *Client, des, nw *TargetAssociatedEntities) *TargetAssociatedEntities { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAssociatedEntities while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GkeClusters = canonicalizeNewTargetAssociatedEntitiesGkeClustersSlice(c, des.GkeClusters, nw.GkeClusters) + nw.AnthosClusters = canonicalizeNewTargetAssociatedEntitiesAnthosClustersSlice(c, des.AnthosClusters, nw.AnthosClusters) + + return nw +} + +func canonicalizeNewTargetAssociatedEntitiesSet(c *Client, des, nw []TargetAssociatedEntities) []TargetAssociatedEntities { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAssociatedEntities + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAssociatedEntitiesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAssociatedEntities(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAssociatedEntitiesSlice(c *Client, des, nw []TargetAssociatedEntities) []TargetAssociatedEntities { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAssociatedEntities + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAssociatedEntities(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAssociatedEntitiesGkeClusters(des, initial *TargetAssociatedEntitiesGkeClusters, opts ...dcl.ApplyOption) *TargetAssociatedEntitiesGkeClusters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAssociatedEntitiesGkeClusters{} + + if dcl.IsZeroValue(des.Cluster) || (dcl.IsEmptyValueIndirect(des.Cluster) && dcl.IsEmptyValueIndirect(initial.Cluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Cluster = initial.Cluster + } else { + cDes.Cluster = des.Cluster + } + if dcl.BoolCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { + cDes.InternalIP = initial.InternalIP + } else { + cDes.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, initial.ProxyUrl) || dcl.IsZeroValue(des.ProxyUrl) { + cDes.ProxyUrl = initial.ProxyUrl + } else { + cDes.ProxyUrl = des.ProxyUrl + } + + return cDes +} + +func canonicalizeTargetAssociatedEntitiesGkeClustersSlice(des, initial []TargetAssociatedEntitiesGkeClusters, opts ...dcl.ApplyOption) []TargetAssociatedEntitiesGkeClusters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAssociatedEntitiesGkeClusters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAssociatedEntitiesGkeClusters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAssociatedEntitiesGkeClusters(c *Client, des, nw *TargetAssociatedEntitiesGkeClusters) *TargetAssociatedEntitiesGkeClusters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAssociatedEntitiesGkeClusters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.InternalIP, nw.InternalIP) { + nw.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, nw.ProxyUrl) { + nw.ProxyUrl = des.ProxyUrl + } + + return nw +} + +func canonicalizeNewTargetAssociatedEntitiesGkeClustersSet(c *Client, des, nw []TargetAssociatedEntitiesGkeClusters) []TargetAssociatedEntitiesGkeClusters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAssociatedEntitiesGkeClusters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAssociatedEntitiesGkeClustersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAssociatedEntitiesGkeClusters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAssociatedEntitiesGkeClustersSlice(c *Client, des, nw []TargetAssociatedEntitiesGkeClusters) []TargetAssociatedEntitiesGkeClusters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAssociatedEntitiesGkeClusters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAssociatedEntitiesGkeClusters(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAssociatedEntitiesAnthosClusters(des, initial *TargetAssociatedEntitiesAnthosClusters, opts ...dcl.ApplyOption) *TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAssociatedEntitiesAnthosClusters{} + + if dcl.IsZeroValue(des.Membership) || (dcl.IsEmptyValueIndirect(des.Membership) && dcl.IsEmptyValueIndirect(initial.Membership)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Membership = initial.Membership + } else { + cDes.Membership = des.Membership + } + + return cDes +} + +func canonicalizeTargetAssociatedEntitiesAnthosClustersSlice(des, initial []TargetAssociatedEntitiesAnthosClusters, opts ...dcl.ApplyOption) []TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAssociatedEntitiesAnthosClusters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAssociatedEntitiesAnthosClusters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAssociatedEntitiesAnthosClusters(c *Client, des, nw *TargetAssociatedEntitiesAnthosClusters) *TargetAssociatedEntitiesAnthosClusters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAssociatedEntitiesAnthosClusters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewTargetAssociatedEntitiesAnthosClustersSet(c *Client, des, nw []TargetAssociatedEntitiesAnthosClusters) []TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAssociatedEntitiesAnthosClusters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAssociatedEntitiesAnthosClustersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAssociatedEntitiesAnthosClusters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAssociatedEntitiesAnthosClustersSlice(c *Client, des, nw []TargetAssociatedEntitiesAnthosClusters) []TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAssociatedEntitiesAnthosClusters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAssociatedEntitiesAnthosClusters(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffTarget(c *Client, desired, actual *Target, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetId, actual.TargetId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TargetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.RequireApproval, actual.RequireApproval, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("RequireApproval")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Gke, actual.Gke, dcl.DiffInfo{ObjectFunction: compareTargetGkeNewStyle, EmptyObject: EmptyTargetGke, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Gke")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AnthosCluster, actual.AnthosCluster, dcl.DiffInfo{ObjectFunction: compareTargetAnthosClusterNewStyle, EmptyObject: EmptyTargetAnthosCluster, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("AnthosCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionConfigs, actual.ExecutionConfigs, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareTargetExecutionConfigsNewStyle, EmptyObject: EmptyTargetExecutionConfigs, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ExecutionConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Run, actual.Run, dcl.DiffInfo{ObjectFunction: compareTargetRunNewStyle, EmptyObject: EmptyTargetRun, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Run")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MultiTarget, actual.MultiTarget, dcl.DiffInfo{ObjectFunction: compareTargetMultiTargetNewStyle, EmptyObject: EmptyTargetMultiTarget, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("MultiTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeployParameters, actual.DeployParameters, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("DeployParameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CustomTarget, actual.CustomTarget, dcl.DiffInfo{ObjectFunction: compareTargetCustomTargetNewStyle, EmptyObject: EmptyTargetCustomTarget, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("CustomTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssociatedEntities, actual.AssociatedEntities, dcl.DiffInfo{ObjectFunction: compareTargetAssociatedEntitiesNewStyle, EmptyObject: EmptyTargetAssociatedEntities, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("AssociatedEntities")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareTargetGkeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetGke) + if !ok { + desiredNotPointer, ok := d.(TargetGke) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetGke or *TargetGke", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetGke) + if !ok { + actualNotPointer, ok := a.(TargetGke) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetGke", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyUrl, actual.ProxyUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ProxyUrl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DnsEndpoint, actual.DnsEndpoint, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("DnsEndpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAnthosClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAnthosCluster) + if !ok { + desiredNotPointer, ok := d.(TargetAnthosCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAnthosCluster or *TargetAnthosCluster", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAnthosCluster) + if !ok { + actualNotPointer, ok := a.(TargetAnthosCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAnthosCluster", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetExecutionConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetExecutionConfigs) + if !ok { + desiredNotPointer, ok := d.(TargetExecutionConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetExecutionConfigs or *TargetExecutionConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetExecutionConfigs) + if !ok { + actualNotPointer, ok := a.(TargetExecutionConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetExecutionConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Usages, actual.Usages, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Usages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerPool, actual.WorkerPool, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("WorkerPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArtifactStorage, actual.ArtifactStorage, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ArtifactStorage")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionTimeout, actual.ExecutionTimeout, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ExecutionTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verbose, actual.Verbose, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Verbose")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetRunNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetRun) + if !ok { + desiredNotPointer, ok := d.(TargetRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetRun or *TargetRun", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetRun) + if !ok { + actualNotPointer, ok := a.(TargetRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetRun", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetMultiTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetMultiTarget) + if !ok { + desiredNotPointer, ok := d.(TargetMultiTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetMultiTarget or *TargetMultiTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetMultiTarget) + if !ok { + actualNotPointer, ok := a.(TargetMultiTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetMultiTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetIds, actual.TargetIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("TargetIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetCustomTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetCustomTarget) + if !ok { + desiredNotPointer, ok := d.(TargetCustomTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetCustomTarget or *TargetCustomTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetCustomTarget) + if !ok { + actualNotPointer, ok := a.(TargetCustomTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetCustomTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.CustomTargetType, actual.CustomTargetType, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("CustomTargetType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAssociatedEntitiesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAssociatedEntities) + if !ok { + desiredNotPointer, ok := d.(TargetAssociatedEntities) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntities or *TargetAssociatedEntities", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAssociatedEntities) + if !ok { + actualNotPointer, ok := a.(TargetAssociatedEntities) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntities", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GkeClusters, actual.GkeClusters, dcl.DiffInfo{ObjectFunction: compareTargetAssociatedEntitiesGkeClustersNewStyle, EmptyObject: EmptyTargetAssociatedEntitiesGkeClusters, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("GkeClusters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AnthosClusters, actual.AnthosClusters, dcl.DiffInfo{ObjectFunction: compareTargetAssociatedEntitiesAnthosClustersNewStyle, EmptyObject: EmptyTargetAssociatedEntitiesAnthosClusters, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("AnthosClusters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAssociatedEntitiesGkeClustersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAssociatedEntitiesGkeClusters) + if !ok { + desiredNotPointer, ok := d.(TargetAssociatedEntitiesGkeClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesGkeClusters or *TargetAssociatedEntitiesGkeClusters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAssociatedEntitiesGkeClusters) + if !ok { + actualNotPointer, ok := a.(TargetAssociatedEntitiesGkeClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesGkeClusters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyUrl, actual.ProxyUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ProxyUrl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAssociatedEntitiesAnthosClustersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAssociatedEntitiesAnthosClusters) + if !ok { + desiredNotPointer, ok := d.(TargetAssociatedEntitiesAnthosClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesAnthosClusters or *TargetAssociatedEntitiesAnthosClusters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAssociatedEntitiesAnthosClusters) + if !ok { + actualNotPointer, ok := a.(TargetAssociatedEntitiesAnthosClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesAnthosClusters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Target) urlNormalized() *Target { + normalized := dcl.Copy(*r).(Target) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.TargetId = dcl.SelfLinkToName(r.TargetId) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Target) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateTarget" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/targets/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Target resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Target) marshal(c *Client) ([]byte, error) { + m, err := expandTarget(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Target: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalTarget decodes JSON responses into the Target resource schema. +func unmarshalTarget(b []byte, c *Client, res *Target) (*Target, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapTarget(m, c, res) +} + +func unmarshalMapTarget(m map[string]interface{}, c *Client, res *Target) (*Target, error) { + + flattened := flattenTarget(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandTarget expands Target into a JSON request object. +func expandTarget(c *Client, f *Target) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.RequireApproval; dcl.ValueShouldBeSent(v) { + m["requireApproval"] = v + } + if v, err := expandTargetGke(c, f.Gke, res); err != nil { + return nil, fmt.Errorf("error expanding Gke into gke: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gke"] = v + } + if v, err := expandTargetAnthosCluster(c, f.AnthosCluster, res); err != nil { + return nil, fmt.Errorf("error expanding AnthosCluster into anthosCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["anthosCluster"] = v + } + if v, err := expandTargetExecutionConfigsSlice(c, f.ExecutionConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding ExecutionConfigs into executionConfigs: %w", err) + } else if v != nil { + m["executionConfigs"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandTargetRun(c, f.Run, res); err != nil { + return nil, fmt.Errorf("error expanding Run into run: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["run"] = v + } + if v, err := expandTargetMultiTarget(c, f.MultiTarget, res); err != nil { + return nil, fmt.Errorf("error expanding MultiTarget into multiTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["multiTarget"] = v + } + if v := f.DeployParameters; dcl.ValueShouldBeSent(v) { + m["deployParameters"] = v + } + if v, err := expandTargetCustomTarget(c, f.CustomTarget, res); err != nil { + return nil, fmt.Errorf("error expanding CustomTarget into customTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["customTarget"] = v + } + if v, err := expandTargetAssociatedEntitiesMap(c, f.AssociatedEntities, res); err != nil { + return nil, fmt.Errorf("error expanding AssociatedEntities into associatedEntities: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["associatedEntities"] = v + } + + return m, nil +} + +// flattenTarget flattens Target from a JSON request object into the +// Target type. +func flattenTarget(c *Client, i interface{}, res *Target) *Target { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Target{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.TargetId = dcl.FlattenString(m["targetId"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.RequireApproval = dcl.FlattenBool(m["requireApproval"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Gke = flattenTargetGke(c, m["gke"], res) + resultRes.AnthosCluster = flattenTargetAnthosCluster(c, m["anthosCluster"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.ExecutionConfigs = flattenTargetExecutionConfigsSlice(c, m["executionConfigs"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Run = flattenTargetRun(c, m["run"], res) + resultRes.MultiTarget = flattenTargetMultiTarget(c, m["multiTarget"], res) + resultRes.DeployParameters = dcl.FlattenKeyValuePairs(m["deployParameters"]) + resultRes.CustomTarget = flattenTargetCustomTarget(c, m["customTarget"], res) + resultRes.AssociatedEntities = flattenTargetAssociatedEntitiesMap(c, m["associatedEntities"], res) + + return resultRes +} + +// expandTargetGkeMap expands the contents of TargetGke into a JSON +// request object. +func expandTargetGkeMap(c *Client, f map[string]TargetGke, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetGke(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetGkeSlice expands the contents of TargetGke into a JSON +// request object. +func expandTargetGkeSlice(c *Client, f []TargetGke, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetGke(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetGkeMap flattens the contents of TargetGke from a JSON +// response object. +func flattenTargetGkeMap(c *Client, i interface{}, res *Target) map[string]TargetGke { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetGke{} + } + + if len(a) == 0 { + return map[string]TargetGke{} + } + + items := make(map[string]TargetGke) + for k, item := range a { + items[k] = *flattenTargetGke(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetGkeSlice flattens the contents of TargetGke from a JSON +// response object. +func flattenTargetGkeSlice(c *Client, i interface{}, res *Target) []TargetGke { + a, ok := i.([]interface{}) + if !ok { + return []TargetGke{} + } + + if len(a) == 0 { + return []TargetGke{} + } + + items := make([]TargetGke, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetGke(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetGke expands an instance of TargetGke into a JSON +// request object. +func expandTargetGke(c *Client, f *TargetGke, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Cluster; !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { + m["internalIp"] = v + } + if v := f.ProxyUrl; !dcl.IsEmptyValueIndirect(v) { + m["proxyUrl"] = v + } + if v := f.DnsEndpoint; !dcl.IsEmptyValueIndirect(v) { + m["dnsEndpoint"] = v + } + + return m, nil +} + +// flattenTargetGke flattens an instance of TargetGke from a JSON +// response object. +func flattenTargetGke(c *Client, i interface{}, res *Target) *TargetGke { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetGke{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetGke + } + r.Cluster = dcl.FlattenString(m["cluster"]) + r.InternalIP = dcl.FlattenBool(m["internalIp"]) + r.ProxyUrl = dcl.FlattenString(m["proxyUrl"]) + r.DnsEndpoint = dcl.FlattenBool(m["dnsEndpoint"]) + + return r +} + +// expandTargetAnthosClusterMap expands the contents of TargetAnthosCluster into a JSON +// request object. +func expandTargetAnthosClusterMap(c *Client, f map[string]TargetAnthosCluster, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAnthosCluster(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAnthosClusterSlice expands the contents of TargetAnthosCluster into a JSON +// request object. +func expandTargetAnthosClusterSlice(c *Client, f []TargetAnthosCluster, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAnthosCluster(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAnthosClusterMap flattens the contents of TargetAnthosCluster from a JSON +// response object. +func flattenTargetAnthosClusterMap(c *Client, i interface{}, res *Target) map[string]TargetAnthosCluster { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAnthosCluster{} + } + + if len(a) == 0 { + return map[string]TargetAnthosCluster{} + } + + items := make(map[string]TargetAnthosCluster) + for k, item := range a { + items[k] = *flattenTargetAnthosCluster(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAnthosClusterSlice flattens the contents of TargetAnthosCluster from a JSON +// response object. +func flattenTargetAnthosClusterSlice(c *Client, i interface{}, res *Target) []TargetAnthosCluster { + a, ok := i.([]interface{}) + if !ok { + return []TargetAnthosCluster{} + } + + if len(a) == 0 { + return []TargetAnthosCluster{} + } + + items := make([]TargetAnthosCluster, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAnthosCluster(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAnthosCluster expands an instance of TargetAnthosCluster into a JSON +// request object. +func expandTargetAnthosCluster(c *Client, f *TargetAnthosCluster, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Membership; !dcl.IsEmptyValueIndirect(v) { + m["membership"] = v + } + + return m, nil +} + +// flattenTargetAnthosCluster flattens an instance of TargetAnthosCluster from a JSON +// response object. +func flattenTargetAnthosCluster(c *Client, i interface{}, res *Target) *TargetAnthosCluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAnthosCluster{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAnthosCluster + } + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +// expandTargetExecutionConfigsMap expands the contents of TargetExecutionConfigs into a JSON +// request object. +func expandTargetExecutionConfigsMap(c *Client, f map[string]TargetExecutionConfigs, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetExecutionConfigs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetExecutionConfigsSlice expands the contents of TargetExecutionConfigs into a JSON +// request object. +func expandTargetExecutionConfigsSlice(c *Client, f []TargetExecutionConfigs, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetExecutionConfigs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetExecutionConfigsMap flattens the contents of TargetExecutionConfigs from a JSON +// response object. +func flattenTargetExecutionConfigsMap(c *Client, i interface{}, res *Target) map[string]TargetExecutionConfigs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetExecutionConfigs{} + } + + if len(a) == 0 { + return map[string]TargetExecutionConfigs{} + } + + items := make(map[string]TargetExecutionConfigs) + for k, item := range a { + items[k] = *flattenTargetExecutionConfigs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetExecutionConfigsSlice flattens the contents of TargetExecutionConfigs from a JSON +// response object. +func flattenTargetExecutionConfigsSlice(c *Client, i interface{}, res *Target) []TargetExecutionConfigs { + a, ok := i.([]interface{}) + if !ok { + return []TargetExecutionConfigs{} + } + + if len(a) == 0 { + return []TargetExecutionConfigs{} + } + + items := make([]TargetExecutionConfigs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetExecutionConfigs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetExecutionConfigs expands an instance of TargetExecutionConfigs into a JSON +// request object. +func expandTargetExecutionConfigs(c *Client, f *TargetExecutionConfigs, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Usages; v != nil { + m["usages"] = v + } + if v := f.WorkerPool; !dcl.IsEmptyValueIndirect(v) { + m["workerPool"] = v + } + if v := f.ServiceAccount; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccount"] = v + } + if v := f.ArtifactStorage; !dcl.IsEmptyValueIndirect(v) { + m["artifactStorage"] = v + } + if v := f.ExecutionTimeout; !dcl.IsEmptyValueIndirect(v) { + m["executionTimeout"] = v + } + if v := f.Verbose; !dcl.IsEmptyValueIndirect(v) { + m["verbose"] = v + } + + return m, nil +} + +// flattenTargetExecutionConfigs flattens an instance of TargetExecutionConfigs from a JSON +// response object. +func flattenTargetExecutionConfigs(c *Client, i interface{}, res *Target) *TargetExecutionConfigs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetExecutionConfigs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetExecutionConfigs + } + r.Usages = flattenTargetExecutionConfigsUsagesEnumSlice(c, m["usages"], res) + r.WorkerPool = dcl.FlattenString(m["workerPool"]) + r.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + r.ArtifactStorage = dcl.FlattenString(m["artifactStorage"]) + r.ExecutionTimeout = dcl.FlattenString(m["executionTimeout"]) + r.Verbose = dcl.FlattenBool(m["verbose"]) + + return r +} + +// expandTargetRunMap expands the contents of TargetRun into a JSON +// request object. +func expandTargetRunMap(c *Client, f map[string]TargetRun, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetRun(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetRunSlice expands the contents of TargetRun into a JSON +// request object. +func expandTargetRunSlice(c *Client, f []TargetRun, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetRun(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetRunMap flattens the contents of TargetRun from a JSON +// response object. +func flattenTargetRunMap(c *Client, i interface{}, res *Target) map[string]TargetRun { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetRun{} + } + + if len(a) == 0 { + return map[string]TargetRun{} + } + + items := make(map[string]TargetRun) + for k, item := range a { + items[k] = *flattenTargetRun(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetRunSlice flattens the contents of TargetRun from a JSON +// response object. +func flattenTargetRunSlice(c *Client, i interface{}, res *Target) []TargetRun { + a, ok := i.([]interface{}) + if !ok { + return []TargetRun{} + } + + if len(a) == 0 { + return []TargetRun{} + } + + items := make([]TargetRun, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetRun(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetRun expands an instance of TargetRun into a JSON +// request object. +func expandTargetRun(c *Client, f *TargetRun, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Location; !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenTargetRun flattens an instance of TargetRun from a JSON +// response object. +func flattenTargetRun(c *Client, i interface{}, res *Target) *TargetRun { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetRun{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetRun + } + r.Location = dcl.FlattenString(m["location"]) + + return r +} + +// expandTargetMultiTargetMap expands the contents of TargetMultiTarget into a JSON +// request object. +func expandTargetMultiTargetMap(c *Client, f map[string]TargetMultiTarget, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetMultiTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetMultiTargetSlice expands the contents of TargetMultiTarget into a JSON +// request object. +func expandTargetMultiTargetSlice(c *Client, f []TargetMultiTarget, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetMultiTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetMultiTargetMap flattens the contents of TargetMultiTarget from a JSON +// response object. +func flattenTargetMultiTargetMap(c *Client, i interface{}, res *Target) map[string]TargetMultiTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetMultiTarget{} + } + + if len(a) == 0 { + return map[string]TargetMultiTarget{} + } + + items := make(map[string]TargetMultiTarget) + for k, item := range a { + items[k] = *flattenTargetMultiTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetMultiTargetSlice flattens the contents of TargetMultiTarget from a JSON +// response object. +func flattenTargetMultiTargetSlice(c *Client, i interface{}, res *Target) []TargetMultiTarget { + a, ok := i.([]interface{}) + if !ok { + return []TargetMultiTarget{} + } + + if len(a) == 0 { + return []TargetMultiTarget{} + } + + items := make([]TargetMultiTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetMultiTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetMultiTarget expands an instance of TargetMultiTarget into a JSON +// request object. +func expandTargetMultiTarget(c *Client, f *TargetMultiTarget, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetIds; v != nil { + m["targetIds"] = v + } + + return m, nil +} + +// flattenTargetMultiTarget flattens an instance of TargetMultiTarget from a JSON +// response object. +func flattenTargetMultiTarget(c *Client, i interface{}, res *Target) *TargetMultiTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetMultiTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetMultiTarget + } + r.TargetIds = dcl.FlattenStringSlice(m["targetIds"]) + + return r +} + +// expandTargetCustomTargetMap expands the contents of TargetCustomTarget into a JSON +// request object. +func expandTargetCustomTargetMap(c *Client, f map[string]TargetCustomTarget, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetCustomTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetCustomTargetSlice expands the contents of TargetCustomTarget into a JSON +// request object. +func expandTargetCustomTargetSlice(c *Client, f []TargetCustomTarget, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetCustomTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetCustomTargetMap flattens the contents of TargetCustomTarget from a JSON +// response object. +func flattenTargetCustomTargetMap(c *Client, i interface{}, res *Target) map[string]TargetCustomTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetCustomTarget{} + } + + if len(a) == 0 { + return map[string]TargetCustomTarget{} + } + + items := make(map[string]TargetCustomTarget) + for k, item := range a { + items[k] = *flattenTargetCustomTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetCustomTargetSlice flattens the contents of TargetCustomTarget from a JSON +// response object. +func flattenTargetCustomTargetSlice(c *Client, i interface{}, res *Target) []TargetCustomTarget { + a, ok := i.([]interface{}) + if !ok { + return []TargetCustomTarget{} + } + + if len(a) == 0 { + return []TargetCustomTarget{} + } + + items := make([]TargetCustomTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetCustomTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetCustomTarget expands an instance of TargetCustomTarget into a JSON +// request object. +func expandTargetCustomTarget(c *Client, f *TargetCustomTarget, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.CustomTargetType; !dcl.IsEmptyValueIndirect(v) { + m["customTargetType"] = v + } + + return m, nil +} + +// flattenTargetCustomTarget flattens an instance of TargetCustomTarget from a JSON +// response object. +func flattenTargetCustomTarget(c *Client, i interface{}, res *Target) *TargetCustomTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetCustomTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetCustomTarget + } + r.CustomTargetType = dcl.FlattenString(m["customTargetType"]) + + return r +} + +// expandTargetAssociatedEntitiesMap expands the contents of TargetAssociatedEntities into a JSON +// request object. +func expandTargetAssociatedEntitiesMap(c *Client, f map[string]TargetAssociatedEntities, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAssociatedEntities(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAssociatedEntitiesSlice expands the contents of TargetAssociatedEntities into a JSON +// request object. +func expandTargetAssociatedEntitiesSlice(c *Client, f []TargetAssociatedEntities, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAssociatedEntities(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAssociatedEntitiesMap flattens the contents of TargetAssociatedEntities from a JSON +// response object. +func flattenTargetAssociatedEntitiesMap(c *Client, i interface{}, res *Target) map[string]TargetAssociatedEntities { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAssociatedEntities{} + } + + if len(a) == 0 { + return map[string]TargetAssociatedEntities{} + } + + items := make(map[string]TargetAssociatedEntities) + for k, item := range a { + items[k] = *flattenTargetAssociatedEntities(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAssociatedEntitiesSlice flattens the contents of TargetAssociatedEntities from a JSON +// response object. +func flattenTargetAssociatedEntitiesSlice(c *Client, i interface{}, res *Target) []TargetAssociatedEntities { + a, ok := i.([]interface{}) + if !ok { + return []TargetAssociatedEntities{} + } + + if len(a) == 0 { + return []TargetAssociatedEntities{} + } + + items := make([]TargetAssociatedEntities, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAssociatedEntities(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAssociatedEntities expands an instance of TargetAssociatedEntities into a JSON +// request object. +func expandTargetAssociatedEntities(c *Client, f *TargetAssociatedEntities, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandTargetAssociatedEntitiesGkeClustersSlice(c, f.GkeClusters, res); err != nil { + return nil, fmt.Errorf("error expanding GkeClusters into gkeClusters: %w", err) + } else if v != nil { + m["gkeClusters"] = v + } + if v, err := expandTargetAssociatedEntitiesAnthosClustersSlice(c, f.AnthosClusters, res); err != nil { + return nil, fmt.Errorf("error expanding AnthosClusters into anthosClusters: %w", err) + } else if v != nil { + m["anthosClusters"] = v + } + + return m, nil +} + +// flattenTargetAssociatedEntities flattens an instance of TargetAssociatedEntities from a JSON +// response object. +func flattenTargetAssociatedEntities(c *Client, i interface{}, res *Target) *TargetAssociatedEntities { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAssociatedEntities{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAssociatedEntities + } + r.GkeClusters = flattenTargetAssociatedEntitiesGkeClustersSlice(c, m["gkeClusters"], res) + r.AnthosClusters = flattenTargetAssociatedEntitiesAnthosClustersSlice(c, m["anthosClusters"], res) + + return r +} + +// expandTargetAssociatedEntitiesGkeClustersMap expands the contents of TargetAssociatedEntitiesGkeClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesGkeClustersMap(c *Client, f map[string]TargetAssociatedEntitiesGkeClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAssociatedEntitiesGkeClusters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAssociatedEntitiesGkeClustersSlice expands the contents of TargetAssociatedEntitiesGkeClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesGkeClustersSlice(c *Client, f []TargetAssociatedEntitiesGkeClusters, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAssociatedEntitiesGkeClusters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAssociatedEntitiesGkeClustersMap flattens the contents of TargetAssociatedEntitiesGkeClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesGkeClustersMap(c *Client, i interface{}, res *Target) map[string]TargetAssociatedEntitiesGkeClusters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAssociatedEntitiesGkeClusters{} + } + + if len(a) == 0 { + return map[string]TargetAssociatedEntitiesGkeClusters{} + } + + items := make(map[string]TargetAssociatedEntitiesGkeClusters) + for k, item := range a { + items[k] = *flattenTargetAssociatedEntitiesGkeClusters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAssociatedEntitiesGkeClustersSlice flattens the contents of TargetAssociatedEntitiesGkeClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesGkeClustersSlice(c *Client, i interface{}, res *Target) []TargetAssociatedEntitiesGkeClusters { + a, ok := i.([]interface{}) + if !ok { + return []TargetAssociatedEntitiesGkeClusters{} + } + + if len(a) == 0 { + return []TargetAssociatedEntitiesGkeClusters{} + } + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAssociatedEntitiesGkeClusters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAssociatedEntitiesGkeClusters expands an instance of TargetAssociatedEntitiesGkeClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesGkeClusters(c *Client, f *TargetAssociatedEntitiesGkeClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Cluster; !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { + m["internalIp"] = v + } + if v := f.ProxyUrl; !dcl.IsEmptyValueIndirect(v) { + m["proxyUrl"] = v + } + + return m, nil +} + +// flattenTargetAssociatedEntitiesGkeClusters flattens an instance of TargetAssociatedEntitiesGkeClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesGkeClusters(c *Client, i interface{}, res *Target) *TargetAssociatedEntitiesGkeClusters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAssociatedEntitiesGkeClusters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAssociatedEntitiesGkeClusters + } + r.Cluster = dcl.FlattenString(m["cluster"]) + r.InternalIP = dcl.FlattenBool(m["internalIp"]) + r.ProxyUrl = dcl.FlattenString(m["proxyUrl"]) + + return r +} + +// expandTargetAssociatedEntitiesAnthosClustersMap expands the contents of TargetAssociatedEntitiesAnthosClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesAnthosClustersMap(c *Client, f map[string]TargetAssociatedEntitiesAnthosClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAssociatedEntitiesAnthosClusters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAssociatedEntitiesAnthosClustersSlice expands the contents of TargetAssociatedEntitiesAnthosClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesAnthosClustersSlice(c *Client, f []TargetAssociatedEntitiesAnthosClusters, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAssociatedEntitiesAnthosClusters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAssociatedEntitiesAnthosClustersMap flattens the contents of TargetAssociatedEntitiesAnthosClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesAnthosClustersMap(c *Client, i interface{}, res *Target) map[string]TargetAssociatedEntitiesAnthosClusters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAssociatedEntitiesAnthosClusters{} + } + + if len(a) == 0 { + return map[string]TargetAssociatedEntitiesAnthosClusters{} + } + + items := make(map[string]TargetAssociatedEntitiesAnthosClusters) + for k, item := range a { + items[k] = *flattenTargetAssociatedEntitiesAnthosClusters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAssociatedEntitiesAnthosClustersSlice flattens the contents of TargetAssociatedEntitiesAnthosClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesAnthosClustersSlice(c *Client, i interface{}, res *Target) []TargetAssociatedEntitiesAnthosClusters { + a, ok := i.([]interface{}) + if !ok { + return []TargetAssociatedEntitiesAnthosClusters{} + } + + if len(a) == 0 { + return []TargetAssociatedEntitiesAnthosClusters{} + } + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAssociatedEntitiesAnthosClusters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAssociatedEntitiesAnthosClusters expands an instance of TargetAssociatedEntitiesAnthosClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesAnthosClusters(c *Client, f *TargetAssociatedEntitiesAnthosClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Membership; !dcl.IsEmptyValueIndirect(v) { + m["membership"] = v + } + + return m, nil +} + +// flattenTargetAssociatedEntitiesAnthosClusters flattens an instance of TargetAssociatedEntitiesAnthosClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesAnthosClusters(c *Client, i interface{}, res *Target) *TargetAssociatedEntitiesAnthosClusters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAssociatedEntitiesAnthosClusters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAssociatedEntitiesAnthosClusters + } + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +// flattenTargetExecutionConfigsUsagesEnumMap flattens the contents of TargetExecutionConfigsUsagesEnum from a JSON +// response object. +func flattenTargetExecutionConfigsUsagesEnumMap(c *Client, i interface{}, res *Target) map[string]TargetExecutionConfigsUsagesEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetExecutionConfigsUsagesEnum{} + } + + if len(a) == 0 { + return map[string]TargetExecutionConfigsUsagesEnum{} + } + + items := make(map[string]TargetExecutionConfigsUsagesEnum) + for k, item := range a { + items[k] = *flattenTargetExecutionConfigsUsagesEnum(item.(interface{})) + } + + return items +} + +// flattenTargetExecutionConfigsUsagesEnumSlice flattens the contents of TargetExecutionConfigsUsagesEnum from a JSON +// response object. +func flattenTargetExecutionConfigsUsagesEnumSlice(c *Client, i interface{}, res *Target) []TargetExecutionConfigsUsagesEnum { + a, ok := i.([]interface{}) + if !ok { + return []TargetExecutionConfigsUsagesEnum{} + } + + if len(a) == 0 { + return []TargetExecutionConfigsUsagesEnum{} + } + + items := make([]TargetExecutionConfigsUsagesEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetExecutionConfigsUsagesEnum(item.(interface{}))) + } + + return items +} + +// flattenTargetExecutionConfigsUsagesEnum asserts that an interface is a string, and returns a +// pointer to a *TargetExecutionConfigsUsagesEnum with the same value as that string. +func flattenTargetExecutionConfigsUsagesEnum(i interface{}) *TargetExecutionConfigsUsagesEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return TargetExecutionConfigsUsagesEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Target) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalTarget(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type targetDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp targetApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToTargetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]targetDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []targetDiff + // For each operation name, create a targetDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := targetDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToTargetApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToTargetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (targetApiOperation, error) { + switch opName { + + case "updateTargetUpdateTargetOperation": + return &updateTargetUpdateTargetOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractTargetFields(r *Target) error { + vGke := r.Gke + if vGke == nil { + // note: explicitly not the empty object. + vGke = &TargetGke{} + } + if err := extractTargetGkeFields(r, vGke); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGke) { + r.Gke = vGke + } + vAnthosCluster := r.AnthosCluster + if vAnthosCluster == nil { + // note: explicitly not the empty object. + vAnthosCluster = &TargetAnthosCluster{} + } + if err := extractTargetAnthosClusterFields(r, vAnthosCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAnthosCluster) { + r.AnthosCluster = vAnthosCluster + } + vRun := r.Run + if vRun == nil { + // note: explicitly not the empty object. + vRun = &TargetRun{} + } + if err := extractTargetRunFields(r, vRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRun) { + r.Run = vRun + } + vMultiTarget := r.MultiTarget + if vMultiTarget == nil { + // note: explicitly not the empty object. + vMultiTarget = &TargetMultiTarget{} + } + if err := extractTargetMultiTargetFields(r, vMultiTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMultiTarget) { + r.MultiTarget = vMultiTarget + } + vCustomTarget := r.CustomTarget + if vCustomTarget == nil { + // note: explicitly not the empty object. + vCustomTarget = &TargetCustomTarget{} + } + if err := extractTargetCustomTargetFields(r, vCustomTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomTarget) { + r.CustomTarget = vCustomTarget + } + return nil +} +func extractTargetGkeFields(r *Target, o *TargetGke) error { + return nil +} +func extractTargetAnthosClusterFields(r *Target, o *TargetAnthosCluster) error { + return nil +} +func extractTargetExecutionConfigsFields(r *Target, o *TargetExecutionConfigs) error { + return nil +} +func extractTargetRunFields(r *Target, o *TargetRun) error { + return nil +} +func extractTargetMultiTargetFields(r *Target, o *TargetMultiTarget) error { + return nil +} +func extractTargetCustomTargetFields(r *Target, o *TargetCustomTarget) error { + return nil +} +func extractTargetAssociatedEntitiesFields(r *Target, o *TargetAssociatedEntities) error { + return nil +} +func extractTargetAssociatedEntitiesGkeClustersFields(r *Target, o *TargetAssociatedEntitiesGkeClusters) error { + return nil +} +func extractTargetAssociatedEntitiesAnthosClustersFields(r *Target, o *TargetAssociatedEntitiesAnthosClusters) error { + return nil +} + +func postReadExtractTargetFields(r *Target) error { + vGke := r.Gke + if vGke == nil { + // note: explicitly not the empty object. + vGke = &TargetGke{} + } + if err := postReadExtractTargetGkeFields(r, vGke); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGke) { + r.Gke = vGke + } + vAnthosCluster := r.AnthosCluster + if vAnthosCluster == nil { + // note: explicitly not the empty object. + vAnthosCluster = &TargetAnthosCluster{} + } + if err := postReadExtractTargetAnthosClusterFields(r, vAnthosCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAnthosCluster) { + r.AnthosCluster = vAnthosCluster + } + vRun := r.Run + if vRun == nil { + // note: explicitly not the empty object. + vRun = &TargetRun{} + } + if err := postReadExtractTargetRunFields(r, vRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRun) { + r.Run = vRun + } + vMultiTarget := r.MultiTarget + if vMultiTarget == nil { + // note: explicitly not the empty object. + vMultiTarget = &TargetMultiTarget{} + } + if err := postReadExtractTargetMultiTargetFields(r, vMultiTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMultiTarget) { + r.MultiTarget = vMultiTarget + } + vCustomTarget := r.CustomTarget + if vCustomTarget == nil { + // note: explicitly not the empty object. + vCustomTarget = &TargetCustomTarget{} + } + if err := postReadExtractTargetCustomTargetFields(r, vCustomTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomTarget) { + r.CustomTarget = vCustomTarget + } + return nil +} +func postReadExtractTargetGkeFields(r *Target, o *TargetGke) error { + return nil +} +func postReadExtractTargetAnthosClusterFields(r *Target, o *TargetAnthosCluster) error { + return nil +} +func postReadExtractTargetExecutionConfigsFields(r *Target, o *TargetExecutionConfigs) error { + return nil +} +func postReadExtractTargetRunFields(r *Target, o *TargetRun) error { + return nil +} +func postReadExtractTargetMultiTargetFields(r *Target, o *TargetMultiTarget) error { + return nil +} +func postReadExtractTargetCustomTargetFields(r *Target, o *TargetCustomTarget) error { + return nil +} +func postReadExtractTargetAssociatedEntitiesFields(r *Target, o *TargetAssociatedEntities) error { + return nil +} +func postReadExtractTargetAssociatedEntitiesGkeClustersFields(r *Target, o *TargetAssociatedEntitiesGkeClusters) error { + return nil +} +func postReadExtractTargetAssociatedEntitiesAnthosClustersFields(r *Target, o *TargetAssociatedEntitiesAnthosClusters) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/client.go b/mmv1/third_party/terraform/services/containeraws/client.go new file mode 100644 index 000000000000..e452af5f3c03 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/client.go @@ -0,0 +1,18 @@ +package containeraws + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl new file mode 100644 index 000000000000..5960eaffb6f0 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl @@ -0,0 +1,1582 @@ +package containeraws + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Cluster struct { + Name *string `json:"name"` + Description *string `json:"description"` + Networking *ClusterNetworking `json:"networking"` + AwsRegion *string `json:"awsRegion"` + ControlPlane *ClusterControlPlane `json:"controlPlane"` + Authorization *ClusterAuthorization `json:"authorization"` + State *ClusterStateEnum `json:"state"` + Endpoint *string `json:"endpoint"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + WorkloadIdentityConfig *ClusterWorkloadIdentityConfig `json:"workloadIdentityConfig"` + Project *string `json:"project"` + Location *string `json:"location"` + Fleet *ClusterFleet `json:"fleet"` +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig *ClusterLoggingConfig `json:"loggingConfig"` + MonitoringConfig *ClusterMonitoringConfig `json:"monitoringConfig"` +{{- end }} + BinaryAuthorization *ClusterBinaryAuthorization `json:"binaryAuthorization"` +} + +func (r *Cluster) String() string { + return dcl.SprintResource(r) +} + +// The enum ClusterControlPlaneRootVolumeVolumeTypeEnum. +type ClusterControlPlaneRootVolumeVolumeTypeEnum string + +// ClusterControlPlaneRootVolumeVolumeTypeEnumRef returns a *ClusterControlPlaneRootVolumeVolumeTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterControlPlaneRootVolumeVolumeTypeEnumRef(s string) *ClusterControlPlaneRootVolumeVolumeTypeEnum { + v := ClusterControlPlaneRootVolumeVolumeTypeEnum(s) + return &v +} + +func (v ClusterControlPlaneRootVolumeVolumeTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"VOLUME_TYPE_UNSPECIFIED", "GP2", "GP3"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterControlPlaneRootVolumeVolumeTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterControlPlaneMainVolumeVolumeTypeEnum. +type ClusterControlPlaneMainVolumeVolumeTypeEnum string + +// ClusterControlPlaneMainVolumeVolumeTypeEnumRef returns a *ClusterControlPlaneMainVolumeVolumeTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterControlPlaneMainVolumeVolumeTypeEnumRef(s string) *ClusterControlPlaneMainVolumeVolumeTypeEnum { + v := ClusterControlPlaneMainVolumeVolumeTypeEnum(s) + return &v +} + +func (v ClusterControlPlaneMainVolumeVolumeTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"VOLUME_TYPE_UNSPECIFIED", "GP2", "GP3"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterControlPlaneMainVolumeVolumeTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- if ne $.TargetVersionName "ga" }} +// The enum ClusterControlPlaneInstancePlacementTenancyEnum. +type ClusterControlPlaneInstancePlacementTenancyEnum string + +// ClusterControlPlaneInstancePlacementTenancyEnumRef returns a *ClusterControlPlaneInstancePlacementTenancyEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterControlPlaneInstancePlacementTenancyEnumRef(s string) *ClusterControlPlaneInstancePlacementTenancyEnum { + v := ClusterControlPlaneInstancePlacementTenancyEnum(s) + return &v +} + +func (v ClusterControlPlaneInstancePlacementTenancyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TENANCY_UNSPECIFIED", "DEFAULT", "DEDICATED", "HOST"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterControlPlaneInstancePlacementTenancyEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- end }} +// The enum ClusterStateEnum. +type ClusterStateEnum string + +// ClusterStateEnumRef returns a *ClusterStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStateEnumRef(s string) *ClusterStateEnum { + v := ClusterStateEnum(s) + return &v +} + +func (v ClusterStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStateEnum", +{{- if ne $.TargetVersionName "ga" }} + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterLoggingConfigComponentConfigEnableComponentsEnum. +type ClusterLoggingConfigComponentConfigEnableComponentsEnum string + +// ClusterLoggingConfigComponentConfigEnableComponentsEnumRef returns a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s string) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + v := ClusterLoggingConfigComponentConfigEnableComponentsEnum(s) + return &v +} + +func (v ClusterLoggingConfigComponentConfigEnableComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "SYSTEM_COMPONENTS", "WORKLOADS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", +{{- end }} + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterBinaryAuthorizationEvaluationModeEnum. +type ClusterBinaryAuthorizationEvaluationModeEnum string + +// ClusterBinaryAuthorizationEvaluationModeEnumRef returns a *ClusterBinaryAuthorizationEvaluationModeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterBinaryAuthorizationEvaluationModeEnumRef(s string) *ClusterBinaryAuthorizationEvaluationModeEnum { + v := ClusterBinaryAuthorizationEvaluationModeEnum(s) + return &v +} + +func (v ClusterBinaryAuthorizationEvaluationModeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterBinaryAuthorizationEvaluationModeEnum", + Value: string(v), + Valid: []string{}, + } +} + +type ClusterNetworking struct { + empty bool `json:"-"` + VPCId *string `json:"vpcId"` + PodAddressCidrBlocks []string `json:"podAddressCidrBlocks"` + ServiceAddressCidrBlocks []string `json:"serviceAddressCidrBlocks"` + PerNodePoolSgRulesDisabled *bool `json:"perNodePoolSgRulesDisabled"` +} + +type jsonClusterNetworking ClusterNetworking + +func (r *ClusterNetworking) UnmarshalJSON(data []byte) error { + var res jsonClusterNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterNetworking + } else { + + r.VPCId = res.VPCId + + r.PodAddressCidrBlocks = res.PodAddressCidrBlocks + + r.ServiceAddressCidrBlocks = res.ServiceAddressCidrBlocks + + r.PerNodePoolSgRulesDisabled = res.PerNodePoolSgRulesDisabled + + } + return nil +} + +// This object is used to assert a desired state where this ClusterNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterNetworking *ClusterNetworking = &ClusterNetworking{empty: true} + +func (r *ClusterNetworking) Empty() bool { + return r.empty +} + +func (r *ClusterNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlane struct { + empty bool `json:"-"` + Version *string `json:"version"` + InstanceType *string `json:"instanceType"` + SshConfig *ClusterControlPlaneSshConfig `json:"sshConfig"` + SubnetIds []string `json:"subnetIds"` + ConfigEncryption *ClusterControlPlaneConfigEncryption `json:"configEncryption"` + SecurityGroupIds []string `json:"securityGroupIds"` + IamInstanceProfile *string `json:"iamInstanceProfile"` + RootVolume *ClusterControlPlaneRootVolume `json:"rootVolume"` + MainVolume *ClusterControlPlaneMainVolume `json:"mainVolume"` + DatabaseEncryption *ClusterControlPlaneDatabaseEncryption `json:"databaseEncryption"` + Tags map[string]string `json:"tags"` + AwsServicesAuthentication *ClusterControlPlaneAwsServicesAuthentication `json:"awsServicesAuthentication"` + ProxyConfig *ClusterControlPlaneProxyConfig `json:"proxyConfig"` +{{- if ne $.TargetVersionName "ga" }} + InstancePlacement *ClusterControlPlaneInstancePlacement `json:"instancePlacement"` +{{- end }} +} + +type jsonClusterControlPlane ClusterControlPlane + +func (r *ClusterControlPlane) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlane + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlane + } else { + + r.Version = res.Version + + r.InstanceType = res.InstanceType + + r.SshConfig = res.SshConfig + + r.SubnetIds = res.SubnetIds + + r.ConfigEncryption = res.ConfigEncryption + + r.SecurityGroupIds = res.SecurityGroupIds + + r.IamInstanceProfile = res.IamInstanceProfile + + r.RootVolume = res.RootVolume + + r.MainVolume = res.MainVolume + + r.DatabaseEncryption = res.DatabaseEncryption + + r.Tags = res.Tags + + r.AwsServicesAuthentication = res.AwsServicesAuthentication + + r.ProxyConfig = res.ProxyConfig +{{- if ne $.TargetVersionName "ga" }} + + r.InstancePlacement = res.InstancePlacement +{{- end }} + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlane is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlane *ClusterControlPlane = &ClusterControlPlane{empty: true} + +func (r *ClusterControlPlane) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlane) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlane) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneSshConfig struct { + empty bool `json:"-"` + Ec2KeyPair *string `json:"ec2KeyPair"` +} + +type jsonClusterControlPlaneSshConfig ClusterControlPlaneSshConfig + +func (r *ClusterControlPlaneSshConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneSshConfig + } else { + + r.Ec2KeyPair = res.Ec2KeyPair + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneSshConfig *ClusterControlPlaneSshConfig = &ClusterControlPlaneSshConfig{empty: true} + +func (r *ClusterControlPlaneSshConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneConfigEncryption struct { + empty bool `json:"-"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneConfigEncryption ClusterControlPlaneConfigEncryption + +func (r *ClusterControlPlaneConfigEncryption) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneConfigEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneConfigEncryption + } else { + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneConfigEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneConfigEncryption *ClusterControlPlaneConfigEncryption = &ClusterControlPlaneConfigEncryption{empty: true} + +func (r *ClusterControlPlaneConfigEncryption) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneConfigEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneConfigEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` + VolumeType *ClusterControlPlaneRootVolumeVolumeTypeEnum `json:"volumeType"` + Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneRootVolume ClusterControlPlaneRootVolume + +func (r *ClusterControlPlaneRootVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneRootVolume + } else { + + r.SizeGib = res.SizeGib + + r.VolumeType = res.VolumeType + + r.Iops = res.Iops + + r.Throughput = res.Throughput + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneRootVolume *ClusterControlPlaneRootVolume = &ClusterControlPlaneRootVolume{empty: true} + +func (r *ClusterControlPlaneRootVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneMainVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` + VolumeType *ClusterControlPlaneMainVolumeVolumeTypeEnum `json:"volumeType"` + Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneMainVolume ClusterControlPlaneMainVolume + +func (r *ClusterControlPlaneMainVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneMainVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneMainVolume + } else { + + r.SizeGib = res.SizeGib + + r.VolumeType = res.VolumeType + + r.Iops = res.Iops + + r.Throughput = res.Throughput + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneMainVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneMainVolume *ClusterControlPlaneMainVolume = &ClusterControlPlaneMainVolume{empty: true} + +func (r *ClusterControlPlaneMainVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneMainVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneMainVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneDatabaseEncryption struct { + empty bool `json:"-"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneDatabaseEncryption ClusterControlPlaneDatabaseEncryption + +func (r *ClusterControlPlaneDatabaseEncryption) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneDatabaseEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneDatabaseEncryption + } else { + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneDatabaseEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneDatabaseEncryption *ClusterControlPlaneDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{empty: true} + +func (r *ClusterControlPlaneDatabaseEncryption) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneDatabaseEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneDatabaseEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneAwsServicesAuthentication struct { + empty bool `json:"-"` + RoleArn *string `json:"roleArn"` + RoleSessionName *string `json:"roleSessionName"` +} + +type jsonClusterControlPlaneAwsServicesAuthentication ClusterControlPlaneAwsServicesAuthentication + +func (r *ClusterControlPlaneAwsServicesAuthentication) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneAwsServicesAuthentication + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneAwsServicesAuthentication + } else { + + r.RoleArn = res.RoleArn + + r.RoleSessionName = res.RoleSessionName + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneAwsServicesAuthentication is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneAwsServicesAuthentication *ClusterControlPlaneAwsServicesAuthentication = &ClusterControlPlaneAwsServicesAuthentication{empty: true} + +func (r *ClusterControlPlaneAwsServicesAuthentication) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneAwsServicesAuthentication) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneAwsServicesAuthentication) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneProxyConfig struct { + empty bool `json:"-"` + SecretArn *string `json:"secretArn"` + SecretVersion *string `json:"secretVersion"` +} + +type jsonClusterControlPlaneProxyConfig ClusterControlPlaneProxyConfig + +func (r *ClusterControlPlaneProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneProxyConfig + } else { + + r.SecretArn = res.SecretArn + + r.SecretVersion = res.SecretVersion + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneProxyConfig *ClusterControlPlaneProxyConfig = &ClusterControlPlaneProxyConfig{empty: true} + +func (r *ClusterControlPlaneProxyConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneProxyConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type ClusterControlPlaneInstancePlacement struct { + empty bool `json:"-"` + Tenancy *ClusterControlPlaneInstancePlacementTenancyEnum `json:"tenancy"` +} + +type jsonClusterControlPlaneInstancePlacement ClusterControlPlaneInstancePlacement + +func (r *ClusterControlPlaneInstancePlacement) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneInstancePlacement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneInstancePlacement + } else { + + r.Tenancy = res.Tenancy + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneInstancePlacement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneInstancePlacement *ClusterControlPlaneInstancePlacement = &ClusterControlPlaneInstancePlacement{empty: true} + +func (r *ClusterControlPlaneInstancePlacement) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneInstancePlacement) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneInstancePlacement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +type ClusterAuthorization struct { + empty bool `json:"-"` + AdminUsers []ClusterAuthorizationAdminUsers `json:"adminUsers"` + AdminGroups []ClusterAuthorizationAdminGroups `json:"adminGroups"` +} + +type jsonClusterAuthorization ClusterAuthorization + +func (r *ClusterAuthorization) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorization + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorization + } else { + + r.AdminUsers = res.AdminUsers + + r.AdminGroups = res.AdminGroups + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorization is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorization *ClusterAuthorization = &ClusterAuthorization{empty: true} + +func (r *ClusterAuthorization) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorization) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorization) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminUsers struct { + empty bool `json:"-"` + Username *string `json:"username"` +} + +type jsonClusterAuthorizationAdminUsers ClusterAuthorizationAdminUsers + +func (r *ClusterAuthorizationAdminUsers) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminUsers + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminUsers + } else { + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminUsers is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminUsers *ClusterAuthorizationAdminUsers = &ClusterAuthorizationAdminUsers{empty: true} + +func (r *ClusterAuthorizationAdminUsers) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminUsers) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminUsers) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminGroups struct { + empty bool `json:"-"` + Group *string `json:"group"` +} + +type jsonClusterAuthorizationAdminGroups ClusterAuthorizationAdminGroups + +func (r *ClusterAuthorizationAdminGroups) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminGroups + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminGroups + } else { + + r.Group = res.Group + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminGroups is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminGroups *ClusterAuthorizationAdminGroups = &ClusterAuthorizationAdminGroups{empty: true} + +func (r *ClusterAuthorizationAdminGroups) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminGroups) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminGroups) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterWorkloadIdentityConfig struct { + empty bool `json:"-"` + IssuerUri *string `json:"issuerUri"` + WorkloadPool *string `json:"workloadPool"` + IdentityProvider *string `json:"identityProvider"` +} + +type jsonClusterWorkloadIdentityConfig ClusterWorkloadIdentityConfig + +func (r *ClusterWorkloadIdentityConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterWorkloadIdentityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterWorkloadIdentityConfig + } else { + + r.IssuerUri = res.IssuerUri + + r.WorkloadPool = res.WorkloadPool + + r.IdentityProvider = res.IdentityProvider + + } + return nil +} + +// This object is used to assert a desired state where this ClusterWorkloadIdentityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterWorkloadIdentityConfig *ClusterWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{empty: true} + +func (r *ClusterWorkloadIdentityConfig) Empty() bool { + return r.empty +} + +func (r *ClusterWorkloadIdentityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterWorkloadIdentityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterFleet struct { + empty bool `json:"-"` + Project *string `json:"project"` + Membership *string `json:"membership"` +} + +type jsonClusterFleet ClusterFleet + +func (r *ClusterFleet) UnmarshalJSON(data []byte) error { + var res jsonClusterFleet + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterFleet + } else { + + r.Project = res.Project + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this ClusterFleet is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterFleet *ClusterFleet = &ClusterFleet{empty: true} + +func (r *ClusterFleet) Empty() bool { + return r.empty +} + +func (r *ClusterFleet) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterFleet) HashCode() string { +{{- if ne $.TargetVersionName "ga" }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterLoggingConfig struct { + empty bool `json:"-"` + ComponentConfig *ClusterLoggingConfigComponentConfig `json:"componentConfig"` +} + +type jsonClusterLoggingConfig ClusterLoggingConfig + +func (r *ClusterLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfig + } else { + + r.ComponentConfig = res.ComponentConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfig *ClusterLoggingConfig = &ClusterLoggingConfig{empty: true} + +func (r *ClusterLoggingConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterLoggingConfigComponentConfig struct { + empty bool `json:"-"` + EnableComponents []ClusterLoggingConfigComponentConfigEnableComponentsEnum `json:"enableComponents"` +} + +type jsonClusterLoggingConfigComponentConfig ClusterLoggingConfigComponentConfig + +func (r *ClusterLoggingConfigComponentConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfigComponentConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfigComponentConfig + } else { + + r.EnableComponents = res.EnableComponents + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfigComponentConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfigComponentConfig *ClusterLoggingConfigComponentConfig = &ClusterLoggingConfigComponentConfig{empty: true} + +func (r *ClusterLoggingConfigComponentConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfigComponentConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfigComponentConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfig struct { + empty bool `json:"-"` + ManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig `json:"managedPrometheusConfig"` +} + +type jsonClusterMonitoringConfig ClusterMonitoringConfig + +func (r *ClusterMonitoringConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfig + } else { + + r.ManagedPrometheusConfig = res.ManagedPrometheusConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfig *ClusterMonitoringConfig = &ClusterMonitoringConfig{empty: true} + +func (r *ClusterMonitoringConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfigManagedPrometheusConfig struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` +} + +type jsonClusterMonitoringConfigManagedPrometheusConfig ClusterMonitoringConfigManagedPrometheusConfig + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfigManagedPrometheusConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfigManagedPrometheusConfig + } else { + + r.Enabled = res.Enabled + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfigManagedPrometheusConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfigManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{empty: true} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) HashCode() string { +{{- end }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterBinaryAuthorization struct { + empty bool `json:"-"` + EvaluationMode *ClusterBinaryAuthorizationEvaluationModeEnum `json:"evaluationMode"` +} + +type jsonClusterBinaryAuthorization ClusterBinaryAuthorization + +func (r *ClusterBinaryAuthorization) UnmarshalJSON(data []byte) error { + var res jsonClusterBinaryAuthorization + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterBinaryAuthorization + } else { + + r.EvaluationMode = res.EvaluationMode + + } + return nil +} + +// This object is used to assert a desired state where this ClusterBinaryAuthorization is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterBinaryAuthorization *ClusterBinaryAuthorization = &ClusterBinaryAuthorization{empty: true} + +func (r *ClusterBinaryAuthorization) Empty() bool { + return r.empty +} + +func (r *ClusterBinaryAuthorization) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterBinaryAuthorization) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Cluster) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_aws", + Type: "Cluster", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containeraws", +{{- end }} + } +} + +func (r *Cluster) ID() (string, error) { + if err := extractClusterFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "description": dcl.ValueOrEmptyString(nr.Description), + "networking": dcl.ValueOrEmptyString(nr.Networking), + "aws_region": dcl.ValueOrEmptyString(nr.AwsRegion), + "control_plane": dcl.ValueOrEmptyString(nr.ControlPlane), + "authorization": dcl.ValueOrEmptyString(nr.Authorization), + "state": dcl.ValueOrEmptyString(nr.State), + "endpoint": dcl.ValueOrEmptyString(nr.Endpoint), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "workload_identity_config": dcl.ValueOrEmptyString(nr.WorkloadIdentityConfig), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "fleet": dcl.ValueOrEmptyString(nr.Fleet), +{{- if ne $.TargetVersionName "ga" }} + "logging_config": dcl.ValueOrEmptyString(nr.LoggingConfig), + "monitoring_config": dcl.ValueOrEmptyString(nr.MonitoringConfig), +{{- end }} + "binary_authorization": dcl.ValueOrEmptyString(nr.BinaryAuthorization), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClusterMaxPage = -1 + +type ClusterList struct { + Items []*Cluster + + nextToken string + + pageSize int32 + + resource *Cluster +} + +func (l *ClusterList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClusterList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listCluster(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListCluster(ctx context.Context, project, location string) (*ClusterList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClusterWithMaxResults(ctx, project, location, ClusterMaxPage) + +} + +func (c *Client) ListClusterWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClusterList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Cluster{ + Project: &project, + Location: &location, + } + items, token, err := c.listCluster(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClusterList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClusterFields(r) + + b, err := c.getClusterRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalCluster(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClusterNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClusterFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteCluster(ctx context.Context, r *Cluster) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Cluster resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Cluster...") + deleteOp := deleteClusterOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllCluster deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error { + listObj, err := c.ListCluster(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyCluster(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Cluster + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClusterHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClusterHelper(c *Client, ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyCluster...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClusterFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clusterDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClusterDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clusterApiOperation + if create { + ops = append(ops, &createClusterOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClusterDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClusterDiff(c *Client, ctx context.Context, desired *Cluster, rawDesired *Cluster, ops []clusterApiOperation, opts ...dcl.ApplyOption) (*Cluster, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetCluster(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClusterOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapCluster(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClusterNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClusterNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClusterDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClusterFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClusterFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffCluster(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl new file mode 100644 index 000000000000..bfb5df994069 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl @@ -0,0 +1,7909 @@ +package containeraws + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Cluster) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "networking"); err != nil { + return err + } + if err := dcl.Required(r, "awsRegion"); err != nil { + return err + } + if err := dcl.Required(r, "controlPlane"); err != nil { + return err + } + if err := dcl.Required(r, "authorization"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.Required(r, "fleet"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Networking) { + if err := r.Networking.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ControlPlane) { + if err := r.ControlPlane.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Authorization) { + if err := r.Authorization.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkloadIdentityConfig) { + if err := r.WorkloadIdentityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Fleet) { + if err := r.Fleet.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MonitoringConfig) { + if err := r.MonitoringConfig.validate(); err != nil { + return err + } + } +{{- end }} + if !dcl.IsEmptyValueIndirect(r.BinaryAuthorization) { + if err := r.BinaryAuthorization.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterNetworking) validate() error { + if err := dcl.Required(r, "vpcId"); err != nil { + return err + } + if err := dcl.Required(r, "podAddressCidrBlocks"); err != nil { + return err + } + if err := dcl.Required(r, "serviceAddressCidrBlocks"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlane) validate() error { + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "subnetIds"); err != nil { + return err + } + if err := dcl.Required(r, "configEncryption"); err != nil { + return err + } + if err := dcl.Required(r, "iamInstanceProfile"); err != nil { + return err + } + if err := dcl.Required(r, "databaseEncryption"); err != nil { + return err + } + if err := dcl.Required(r, "awsServicesAuthentication"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ConfigEncryption) { + if err := r.ConfigEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MainVolume) { + if err := r.MainVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DatabaseEncryption) { + if err := r.DatabaseEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AwsServicesAuthentication) { + if err := r.AwsServicesAuthentication.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.InstancePlacement) { + if err := r.InstancePlacement.validate(); err != nil { + return err + } + } +{{- end }} + return nil +} +func (r *ClusterControlPlaneSshConfig) validate() error { + if err := dcl.Required(r, "ec2KeyPair"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneConfigEncryption) validate() error { + if err := dcl.Required(r, "kmsKeyArn"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneRootVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneMainVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneDatabaseEncryption) validate() error { + if err := dcl.Required(r, "kmsKeyArn"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneAwsServicesAuthentication) validate() error { + if err := dcl.Required(r, "roleArn"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneProxyConfig) validate() error { + if err := dcl.Required(r, "secretArn"); err != nil { + return err + } + if err := dcl.Required(r, "secretVersion"); err != nil { + return err + } + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func (r *ClusterControlPlaneInstancePlacement) validate() error { + return nil +} +{{- end }} +func (r *ClusterAuthorization) validate() error { + if err := dcl.Required(r, "adminUsers"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminUsers) validate() error { + if err := dcl.Required(r, "username"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminGroups) validate() error { + if err := dcl.Required(r, "group"); err != nil { + return err + } + return nil +} +func (r *ClusterWorkloadIdentityConfig) validate() error { + return nil +} +func (r *ClusterFleet) validate() error { + if err := dcl.Required(r, "project"); err != nil { + return err + } +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func (r *ClusterLoggingConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ComponentConfig) { + if err := r.ComponentConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterLoggingConfigComponentConfig) validate() error { + return nil +} +func (r *ClusterMonitoringConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ManagedPrometheusConfig) { + if err := r.ManagedPrometheusConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterMonitoringConfigManagedPrometheusConfig) validate() error { +{{- end }} + return nil +} +func (r *ClusterBinaryAuthorization) validate() error { + return nil +} +func (r *Cluster) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *Cluster) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Cluster) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters?awsClusterId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// clusterApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clusterApiOperation interface { + do(context.Context, *Cluster, *Client) error +} + +// newUpdateClusterUpdateAwsClusterRequest creates a request for an +// Cluster resource's UpdateAwsCluster update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateClusterUpdateAwsClusterRequest(ctx context.Context, f *Cluster, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["networking"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["authorization"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["monitoringConfig"] = v +{{- end }} + } + if v, err := expandClusterBinaryAuthorization(c, f.BinaryAuthorization, res); err != nil { + return nil, fmt.Errorf("error expanding BinaryAuthorization into binaryAuthorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["binaryAuthorization"] = v + } + b, err := c.getClusterRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateClusterUpdateAwsClusterRequest converts the update into +// the final JSON request body. +func marshalUpdateClusterUpdateAwsClusterRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateClusterUpdateAwsClusterOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateClusterUpdateAwsClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + _, err := c.GetCluster(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAwsCluster") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateClusterUpdateAwsClusterRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateClusterUpdateAwsClusterRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listClusterRaw(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClusterMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClusterOperation struct { + AwsClusters []map[string]interface{} `json:"awsClusters"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listCluster(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]*Cluster, string, error) { + b, err := c.listClusterRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClusterOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Cluster + for _, v := range m.AwsClusters { + res, err := unmarshalMapCluster(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllCluster(ctx context.Context, f func(*Cluster) bool, resources []*Cluster) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteCluster(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClusterOperation struct{} + +func (op *deleteClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + r, err := c.GetCluster(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Cluster not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetCluster checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetCluster(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClusterOperation struct { + response map[string]interface{} +} + +func (op *createClusterOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetCluster(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClusterRaw(ctx context.Context, r *Cluster) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clusterDiffsForRawDesired(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (initial, desired *Cluster, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Cluster + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Cluster); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Cluster, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetCluster(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Cluster resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Cluster resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Cluster resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Cluster: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Cluster: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClusterFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClusterInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Cluster: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Cluster: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffCluster(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClusterInitialState(rawInitial, rawDesired *Cluster) (*Cluster, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClusterDesiredState(rawDesired, rawInitial *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, nil, opts...) + rawDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, nil, opts...) + rawDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, nil, opts...) + rawDesired.WorkloadIdentityConfig = canonicalizeClusterWorkloadIdentityConfig(rawDesired.WorkloadIdentityConfig, nil, opts...) + rawDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, nil, opts...) +{{- if ne $.TargetVersionName "ga" }} + rawDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, nil, opts...) + rawDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, nil, opts...) +{{- end }} + rawDesired.BinaryAuthorization = canonicalizeClusterBinaryAuthorization(rawDesired.BinaryAuthorization, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Cluster{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + canonicalDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, rawInitial.Networking, opts...) + if dcl.StringCanonicalize(rawDesired.AwsRegion, rawInitial.AwsRegion) { + canonicalDesired.AwsRegion = rawInitial.AwsRegion + } else { + canonicalDesired.AwsRegion = rawDesired.AwsRegion + } + canonicalDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, rawInitial.ControlPlane, opts...) + canonicalDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, rawInitial.Authorization, opts...) + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, rawInitial.Fleet, opts...) +{{- if ne $.TargetVersionName "ga" }} + canonicalDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, rawInitial.LoggingConfig, opts...) + canonicalDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, rawInitial.MonitoringConfig, opts...) +{{- end }} + canonicalDesired.BinaryAuthorization = canonicalizeClusterBinaryAuthorization(rawDesired.BinaryAuthorization, rawInitial.BinaryAuthorization, opts...) + return canonicalDesired, nil +} + +func canonicalizeClusterNewState(c *Client, rawNew, rawDesired *Cluster) (*Cluster, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Networking) && dcl.IsEmptyValueIndirect(rawDesired.Networking) { + rawNew.Networking = rawDesired.Networking + } else { + rawNew.Networking = canonicalizeNewClusterNetworking(c, rawDesired.Networking, rawNew.Networking) + } + + if dcl.IsEmptyValueIndirect(rawNew.AwsRegion) && dcl.IsEmptyValueIndirect(rawDesired.AwsRegion) { + rawNew.AwsRegion = rawDesired.AwsRegion + } else { + if dcl.StringCanonicalize(rawDesired.AwsRegion, rawNew.AwsRegion) { + rawNew.AwsRegion = rawDesired.AwsRegion + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ControlPlane) && dcl.IsEmptyValueIndirect(rawDesired.ControlPlane) { + rawNew.ControlPlane = rawDesired.ControlPlane + } else { + rawNew.ControlPlane = canonicalizeNewClusterControlPlane(c, rawDesired.ControlPlane, rawNew.ControlPlane) + } + + if dcl.IsEmptyValueIndirect(rawNew.Authorization) && dcl.IsEmptyValueIndirect(rawDesired.Authorization) { + rawNew.Authorization = rawDesired.Authorization + } else { + rawNew.Authorization = canonicalizeNewClusterAuthorization(c, rawDesired.Authorization, rawNew.Authorization) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Endpoint) && dcl.IsEmptyValueIndirect(rawDesired.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } else { + if dcl.StringCanonicalize(rawDesired.Endpoint, rawNew.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkloadIdentityConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkloadIdentityConfig) { + rawNew.WorkloadIdentityConfig = rawDesired.WorkloadIdentityConfig + } else { + rawNew.WorkloadIdentityConfig = canonicalizeNewClusterWorkloadIdentityConfig(c, rawDesired.WorkloadIdentityConfig, rawNew.WorkloadIdentityConfig) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Fleet) && dcl.IsEmptyValueIndirect(rawDesired.Fleet) { + rawNew.Fleet = rawDesired.Fleet + } else { + rawNew.Fleet = canonicalizeNewClusterFleet(c, rawDesired.Fleet, rawNew.Fleet) +{{- if ne $.TargetVersionName "ga" }} + } + + if dcl.IsEmptyValueIndirect(rawNew.LoggingConfig) && dcl.IsEmptyValueIndirect(rawDesired.LoggingConfig) { + rawNew.LoggingConfig = rawDesired.LoggingConfig + } else { + rawNew.LoggingConfig = canonicalizeNewClusterLoggingConfig(c, rawDesired.LoggingConfig, rawNew.LoggingConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.MonitoringConfig) && dcl.IsEmptyValueIndirect(rawDesired.MonitoringConfig) { + rawNew.MonitoringConfig = rawDesired.MonitoringConfig + } else { + rawNew.MonitoringConfig = canonicalizeNewClusterMonitoringConfig(c, rawDesired.MonitoringConfig, rawNew.MonitoringConfig) +{{- end }} + } + + if dcl.IsEmptyValueIndirect(rawNew.BinaryAuthorization) && dcl.IsEmptyValueIndirect(rawDesired.BinaryAuthorization) { + rawNew.BinaryAuthorization = rawDesired.BinaryAuthorization + } else { + rawNew.BinaryAuthorization = canonicalizeNewClusterBinaryAuthorization(c, rawDesired.BinaryAuthorization, rawNew.BinaryAuthorization) + } + + return rawNew, nil +} + +func canonicalizeClusterNetworking(des, initial *ClusterNetworking, opts ...dcl.ApplyOption) *ClusterNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterNetworking{} + + if dcl.StringCanonicalize(des.VPCId, initial.VPCId) || dcl.IsZeroValue(des.VPCId) { + cDes.VPCId = initial.VPCId + } else { + cDes.VPCId = des.VPCId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, initial.PodAddressCidrBlocks) { + cDes.PodAddressCidrBlocks = initial.PodAddressCidrBlocks + } else { + cDes.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, initial.ServiceAddressCidrBlocks) { + cDes.ServiceAddressCidrBlocks = initial.ServiceAddressCidrBlocks + } else { + cDes.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + if dcl.BoolCanonicalize(des.PerNodePoolSgRulesDisabled, initial.PerNodePoolSgRulesDisabled) || dcl.IsZeroValue(des.PerNodePoolSgRulesDisabled) { + cDes.PerNodePoolSgRulesDisabled = initial.PerNodePoolSgRulesDisabled + } else { + cDes.PerNodePoolSgRulesDisabled = des.PerNodePoolSgRulesDisabled + } + + return cDes +} + +func canonicalizeClusterNetworkingSlice(des, initial []ClusterNetworking, opts ...dcl.ApplyOption) []ClusterNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterNetworking(c *Client, des, nw *ClusterNetworking) *ClusterNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.VPCId, nw.VPCId) { + nw.VPCId = des.VPCId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, nw.PodAddressCidrBlocks) { + nw.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, nw.ServiceAddressCidrBlocks) { + nw.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + if dcl.BoolCanonicalize(des.PerNodePoolSgRulesDisabled, nw.PerNodePoolSgRulesDisabled) { + nw.PerNodePoolSgRulesDisabled = des.PerNodePoolSgRulesDisabled + } + + return nw +} + +func canonicalizeNewClusterNetworkingSet(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterNetworkingSlice(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlane(des, initial *ClusterControlPlane, opts ...dcl.ApplyOption) *ClusterControlPlane { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlane{} + + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + if dcl.StringCanonicalize(des.InstanceType, initial.InstanceType) || dcl.IsZeroValue(des.InstanceType) { + cDes.InstanceType = initial.InstanceType + } else { + cDes.InstanceType = des.InstanceType + } + cDes.SshConfig = canonicalizeClusterControlPlaneSshConfig(des.SshConfig, initial.SshConfig, opts...) + if dcl.StringArrayCanonicalize(des.SubnetIds, initial.SubnetIds) { + cDes.SubnetIds = initial.SubnetIds + } else { + cDes.SubnetIds = des.SubnetIds + } + cDes.ConfigEncryption = canonicalizeClusterControlPlaneConfigEncryption(des.ConfigEncryption, initial.ConfigEncryption, opts...) + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, initial.SecurityGroupIds) { + cDes.SecurityGroupIds = initial.SecurityGroupIds + } else { + cDes.SecurityGroupIds = des.SecurityGroupIds + } + if dcl.StringCanonicalize(des.IamInstanceProfile, initial.IamInstanceProfile) || dcl.IsZeroValue(des.IamInstanceProfile) { + cDes.IamInstanceProfile = initial.IamInstanceProfile + } else { + cDes.IamInstanceProfile = des.IamInstanceProfile + } + cDes.RootVolume = canonicalizeClusterControlPlaneRootVolume(des.RootVolume, initial.RootVolume, opts...) + cDes.MainVolume = canonicalizeClusterControlPlaneMainVolume(des.MainVolume, initial.MainVolume, opts...) + cDes.DatabaseEncryption = canonicalizeClusterControlPlaneDatabaseEncryption(des.DatabaseEncryption, initial.DatabaseEncryption, opts...) + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + cDes.AwsServicesAuthentication = canonicalizeClusterControlPlaneAwsServicesAuthentication(des.AwsServicesAuthentication, initial.AwsServicesAuthentication, opts...) + cDes.ProxyConfig = canonicalizeClusterControlPlaneProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.InstancePlacement = canonicalizeClusterControlPlaneInstancePlacement(des.InstancePlacement, initial.InstancePlacement, opts...) +{{- end }} + + return cDes +} + +func canonicalizeClusterControlPlaneSlice(des, initial []ClusterControlPlane, opts ...dcl.ApplyOption) []ClusterControlPlane { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlane, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlane(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlane, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlane(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlane(c *Client, des, nw *ClusterControlPlane) *ClusterControlPlane { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlane while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + if dcl.StringCanonicalize(des.InstanceType, nw.InstanceType) { + nw.InstanceType = des.InstanceType + } + nw.SshConfig = canonicalizeNewClusterControlPlaneSshConfig(c, des.SshConfig, nw.SshConfig) + if dcl.StringArrayCanonicalize(des.SubnetIds, nw.SubnetIds) { + nw.SubnetIds = des.SubnetIds + } + nw.ConfigEncryption = canonicalizeNewClusterControlPlaneConfigEncryption(c, des.ConfigEncryption, nw.ConfigEncryption) + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, nw.SecurityGroupIds) { + nw.SecurityGroupIds = des.SecurityGroupIds + } + if dcl.StringCanonicalize(des.IamInstanceProfile, nw.IamInstanceProfile) { + nw.IamInstanceProfile = des.IamInstanceProfile + } + nw.RootVolume = canonicalizeNewClusterControlPlaneRootVolume(c, des.RootVolume, nw.RootVolume) + nw.MainVolume = canonicalizeNewClusterControlPlaneMainVolume(c, des.MainVolume, nw.MainVolume) + nw.DatabaseEncryption = canonicalizeNewClusterControlPlaneDatabaseEncryption(c, des.DatabaseEncryption, nw.DatabaseEncryption) + nw.AwsServicesAuthentication = canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c, des.AwsServicesAuthentication, nw.AwsServicesAuthentication) + nw.ProxyConfig = canonicalizeNewClusterControlPlaneProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.InstancePlacement = canonicalizeNewClusterControlPlaneInstancePlacement(c, des.InstancePlacement, nw.InstancePlacement) +{{- end }} + + return nw +} + +func canonicalizeNewClusterControlPlaneSet(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlane + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSlice(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlane + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneSshConfig(des, initial *ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneSshConfig{} + + if dcl.StringCanonicalize(des.Ec2KeyPair, initial.Ec2KeyPair) || dcl.IsZeroValue(des.Ec2KeyPair) { + cDes.Ec2KeyPair = initial.Ec2KeyPair + } else { + cDes.Ec2KeyPair = des.Ec2KeyPair + } + + return cDes +} + +func canonicalizeClusterControlPlaneSshConfigSlice(des, initial []ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneSshConfig(c *Client, des, nw *ClusterControlPlaneSshConfig) *ClusterControlPlaneSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Ec2KeyPair, nw.Ec2KeyPair) { + nw.Ec2KeyPair = des.Ec2KeyPair + } + + return nw +} + +func canonicalizeNewClusterControlPlaneSshConfigSet(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSshConfigSlice(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneConfigEncryption(des, initial *ClusterControlPlaneConfigEncryption, opts ...dcl.ApplyOption) *ClusterControlPlaneConfigEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneConfigEncryption{} + + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneConfigEncryptionSlice(des, initial []ClusterControlPlaneConfigEncryption, opts ...dcl.ApplyOption) []ClusterControlPlaneConfigEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneConfigEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneConfigEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneConfigEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneConfigEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneConfigEncryption(c *Client, des, nw *ClusterControlPlaneConfigEncryption) *ClusterControlPlaneConfigEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneConfigEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneConfigEncryptionSet(c *Client, des, nw []ClusterControlPlaneConfigEncryption) []ClusterControlPlaneConfigEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneConfigEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneConfigEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneConfigEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneConfigEncryptionSlice(c *Client, des, nw []ClusterControlPlaneConfigEncryption) []ClusterControlPlaneConfigEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneConfigEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneConfigEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneRootVolume(des, initial *ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + if dcl.IsZeroValue(des.VolumeType) || (dcl.IsEmptyValueIndirect(des.VolumeType) && dcl.IsEmptyValueIndirect(initial.VolumeType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.VolumeType = initial.VolumeType + } else { + cDes.VolumeType = des.VolumeType + } + if dcl.IsZeroValue(des.Iops) || (dcl.IsEmptyValueIndirect(des.Iops) && dcl.IsEmptyValueIndirect(initial.Iops)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Iops = initial.Iops + } else { + cDes.Iops = des.Iops + } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneRootVolumeSlice(des, initial []ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneRootVolume(c *Client, des, nw *ClusterControlPlaneRootVolume) *ClusterControlPlaneRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneRootVolumeSet(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneRootVolumeSlice(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneMainVolume(des, initial *ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneMainVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneMainVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + if dcl.IsZeroValue(des.VolumeType) || (dcl.IsEmptyValueIndirect(des.VolumeType) && dcl.IsEmptyValueIndirect(initial.VolumeType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.VolumeType = initial.VolumeType + } else { + cDes.VolumeType = des.VolumeType + } + if dcl.IsZeroValue(des.Iops) || (dcl.IsEmptyValueIndirect(des.Iops) && dcl.IsEmptyValueIndirect(initial.Iops)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Iops = initial.Iops + } else { + cDes.Iops = des.Iops + } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneMainVolumeSlice(des, initial []ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneMainVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneMainVolume(c *Client, des, nw *ClusterControlPlaneMainVolume) *ClusterControlPlaneMainVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneMainVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneMainVolumeSet(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneMainVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneMainVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneMainVolumeSlice(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneMainVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneDatabaseEncryption(des, initial *ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) *ClusterControlPlaneDatabaseEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneDatabaseEncryptionSlice(des, initial []ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) []ClusterControlPlaneDatabaseEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryption(c *Client, des, nw *ClusterControlPlaneDatabaseEncryption) *ClusterControlPlaneDatabaseEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneDatabaseEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSet(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneDatabaseEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneDatabaseEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSlice(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneDatabaseEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneAwsServicesAuthentication(des, initial *ClusterControlPlaneAwsServicesAuthentication, opts ...dcl.ApplyOption) *ClusterControlPlaneAwsServicesAuthentication { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneAwsServicesAuthentication{} + + if dcl.StringCanonicalize(des.RoleArn, initial.RoleArn) || dcl.IsZeroValue(des.RoleArn) { + cDes.RoleArn = initial.RoleArn + } else { + cDes.RoleArn = des.RoleArn + } + if dcl.StringCanonicalize(des.RoleSessionName, initial.RoleSessionName) || dcl.IsZeroValue(des.RoleSessionName) { + cDes.RoleSessionName = initial.RoleSessionName + } else { + cDes.RoleSessionName = des.RoleSessionName + } + + return cDes +} + +func canonicalizeClusterControlPlaneAwsServicesAuthenticationSlice(des, initial []ClusterControlPlaneAwsServicesAuthentication, opts ...dcl.ApplyOption) []ClusterControlPlaneAwsServicesAuthentication { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneAwsServicesAuthentication, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneAwsServicesAuthentication(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneAwsServicesAuthentication, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneAwsServicesAuthentication(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c *Client, des, nw *ClusterControlPlaneAwsServicesAuthentication) *ClusterControlPlaneAwsServicesAuthentication { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneAwsServicesAuthentication while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.RoleArn, nw.RoleArn) { + nw.RoleArn = des.RoleArn + } + if dcl.StringCanonicalize(des.RoleSessionName, nw.RoleSessionName) { + nw.RoleSessionName = des.RoleSessionName + } + + return nw +} + +func canonicalizeNewClusterControlPlaneAwsServicesAuthenticationSet(c *Client, des, nw []ClusterControlPlaneAwsServicesAuthentication) []ClusterControlPlaneAwsServicesAuthentication { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneAwsServicesAuthentication + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneAwsServicesAuthenticationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneAwsServicesAuthenticationSlice(c *Client, des, nw []ClusterControlPlaneAwsServicesAuthentication) []ClusterControlPlaneAwsServicesAuthentication { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneAwsServicesAuthentication + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneProxyConfig(des, initial *ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneProxyConfig{} + + if dcl.StringCanonicalize(des.SecretArn, initial.SecretArn) || dcl.IsZeroValue(des.SecretArn) { + cDes.SecretArn = initial.SecretArn + } else { + cDes.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, initial.SecretVersion) || dcl.IsZeroValue(des.SecretVersion) { + cDes.SecretVersion = initial.SecretVersion + } else { + cDes.SecretVersion = des.SecretVersion + } + + return cDes +} + +func canonicalizeClusterControlPlaneProxyConfigSlice(des, initial []ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneProxyConfig(c *Client, des, nw *ClusterControlPlaneProxyConfig) *ClusterControlPlaneProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SecretArn, nw.SecretArn) { + nw.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, nw.SecretVersion) { + nw.SecretVersion = des.SecretVersion + } + + return nw +} + +func canonicalizeNewClusterControlPlaneProxyConfigSet(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneProxyConfigSlice(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &n)) + } + + return items +} + +{{- if ne $.TargetVersionName "ga" }} +func canonicalizeClusterControlPlaneInstancePlacement(des, initial *ClusterControlPlaneInstancePlacement, opts ...dcl.ApplyOption) *ClusterControlPlaneInstancePlacement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneInstancePlacement{} + + if dcl.IsZeroValue(des.Tenancy) || (dcl.IsEmptyValueIndirect(des.Tenancy) && dcl.IsEmptyValueIndirect(initial.Tenancy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tenancy = initial.Tenancy + } else { + cDes.Tenancy = des.Tenancy + } + + return cDes +} + +func canonicalizeClusterControlPlaneInstancePlacementSlice(des, initial []ClusterControlPlaneInstancePlacement, opts ...dcl.ApplyOption) []ClusterControlPlaneInstancePlacement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneInstancePlacement, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneInstancePlacement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneInstancePlacement, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneInstancePlacement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneInstancePlacement(c *Client, des, nw *ClusterControlPlaneInstancePlacement) *ClusterControlPlaneInstancePlacement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneInstancePlacement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterControlPlaneInstancePlacementSet(c *Client, des, nw []ClusterControlPlaneInstancePlacement) []ClusterControlPlaneInstancePlacement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneInstancePlacement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneInstancePlacementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneInstancePlacement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneInstancePlacementSlice(c *Client, des, nw []ClusterControlPlaneInstancePlacement) []ClusterControlPlaneInstancePlacement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneInstancePlacement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneInstancePlacement(c, &d, &n)) + } + + return items +} + +{{- end }} +func canonicalizeClusterAuthorization(des, initial *ClusterAuthorization, opts ...dcl.ApplyOption) *ClusterAuthorization { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorization{} + + cDes.AdminUsers = canonicalizeClusterAuthorizationAdminUsersSlice(des.AdminUsers, initial.AdminUsers, opts...) + cDes.AdminGroups = canonicalizeClusterAuthorizationAdminGroupsSlice(des.AdminGroups, initial.AdminGroups, opts...) + + return cDes +} + +func canonicalizeClusterAuthorizationSlice(des, initial []ClusterAuthorization, opts ...dcl.ApplyOption) []ClusterAuthorization { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorization, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorization(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorization, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorization(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorization(c *Client, des, nw *ClusterAuthorization) *ClusterAuthorization { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorization while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AdminUsers = canonicalizeNewClusterAuthorizationAdminUsersSlice(c, des.AdminUsers, nw.AdminUsers) + nw.AdminGroups = canonicalizeNewClusterAuthorizationAdminGroupsSlice(c, des.AdminGroups, nw.AdminGroups) + + return nw +} + +func canonicalizeNewClusterAuthorizationSet(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorization + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationSlice(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorization + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminUsers(des, initial *ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminUsers{} + + if dcl.StringCanonicalize(des.Username, initial.Username) || dcl.IsZeroValue(des.Username) { + cDes.Username = initial.Username + } else { + cDes.Username = des.Username + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminUsersSlice(des, initial []ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminUsers(c *Client, des, nw *ClusterAuthorizationAdminUsers) *ClusterAuthorizationAdminUsers { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminUsers while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminUsersSet(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminUsers + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminUsersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminUsersSlice(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminUsers + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminGroups(des, initial *ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminGroups{} + + if dcl.StringCanonicalize(des.Group, initial.Group) || dcl.IsZeroValue(des.Group) { + cDes.Group = initial.Group + } else { + cDes.Group = des.Group + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminGroupsSlice(des, initial []ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminGroups(c *Client, des, nw *ClusterAuthorizationAdminGroups) *ClusterAuthorizationAdminGroups { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminGroups while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Group, nw.Group) { + nw.Group = des.Group + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSet(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminGroups + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminGroupsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSlice(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminGroups + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterWorkloadIdentityConfig(des, initial *ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) *ClusterWorkloadIdentityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterWorkloadIdentityConfig{} + + if dcl.StringCanonicalize(des.IssuerUri, initial.IssuerUri) || dcl.IsZeroValue(des.IssuerUri) { + cDes.IssuerUri = initial.IssuerUri + } else { + cDes.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, initial.WorkloadPool) || dcl.IsZeroValue(des.WorkloadPool) { + cDes.WorkloadPool = initial.WorkloadPool + } else { + cDes.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, initial.IdentityProvider) || dcl.IsZeroValue(des.IdentityProvider) { + cDes.IdentityProvider = initial.IdentityProvider + } else { + cDes.IdentityProvider = des.IdentityProvider + } + + return cDes +} + +func canonicalizeClusterWorkloadIdentityConfigSlice(des, initial []ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) []ClusterWorkloadIdentityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterWorkloadIdentityConfig(c *Client, des, nw *ClusterWorkloadIdentityConfig) *ClusterWorkloadIdentityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterWorkloadIdentityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IssuerUri, nw.IssuerUri) { + nw.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, nw.WorkloadPool) { + nw.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, nw.IdentityProvider) { + nw.IdentityProvider = des.IdentityProvider + } + + return nw +} + +func canonicalizeNewClusterWorkloadIdentityConfigSet(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterWorkloadIdentityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterWorkloadIdentityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterWorkloadIdentityConfigSlice(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterWorkloadIdentityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterFleet(des, initial *ClusterFleet, opts ...dcl.ApplyOption) *ClusterFleet { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterFleet{} + + if dcl.PartialSelfLinkToSelfLink(des.Project, initial.Project) || dcl.IsZeroValue(des.Project) { + cDes.Project = initial.Project + } else { + cDes.Project = des.Project + } + + return cDes +} + +func canonicalizeClusterFleetSlice(des, initial []ClusterFleet, opts ...dcl.ApplyOption) []ClusterFleet { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterFleet, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterFleet(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterFleet, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterFleet(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterFleet(c *Client, des, nw *ClusterFleet) *ClusterFleet { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterFleet while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.PartialSelfLinkToSelfLink(des.Project, nw.Project) { + nw.Project = des.Project + } + if dcl.StringCanonicalize(des.Membership, nw.Membership) { + nw.Membership = des.Membership + } + + return nw +} + +func canonicalizeNewClusterFleetSet(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterFleet + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterFleetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterFleet(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterFleetSlice(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterFleet + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterFleet(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeClusterLoggingConfig(des, initial *ClusterLoggingConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfig{} + + cDes.ComponentConfig = canonicalizeClusterLoggingConfigComponentConfig(des.ComponentConfig, initial.ComponentConfig, opts...) + + return cDes +} + +func canonicalizeClusterLoggingConfigSlice(des, initial []ClusterLoggingConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfig(c *Client, des, nw *ClusterLoggingConfig) *ClusterLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ComponentConfig = canonicalizeNewClusterLoggingConfigComponentConfig(c, des.ComponentConfig, nw.ComponentConfig) + + return nw +} + +func canonicalizeNewClusterLoggingConfigSet(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigSlice(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterLoggingConfigComponentConfig(des, initial *ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfigComponentConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsZeroValue(des.EnableComponents) || (dcl.IsEmptyValueIndirect(des.EnableComponents) && dcl.IsEmptyValueIndirect(initial.EnableComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EnableComponents = initial.EnableComponents + } else { + cDes.EnableComponents = des.EnableComponents + } + + return cDes +} + +func canonicalizeClusterLoggingConfigComponentConfigSlice(des, initial []ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfigComponentConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfigComponentConfig(c *Client, des, nw *ClusterLoggingConfigComponentConfig) *ClusterLoggingConfigComponentConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfigComponentConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSet(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfigComponentConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigComponentConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSlice(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfigComponentConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfig(des, initial *ClusterMonitoringConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfig{} + + cDes.ManagedPrometheusConfig = canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des.ManagedPrometheusConfig, initial.ManagedPrometheusConfig, opts...) + + return cDes +} + +func canonicalizeClusterMonitoringConfigSlice(des, initial []ClusterMonitoringConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfig(c *Client, des, nw *ClusterMonitoringConfig) *ClusterMonitoringConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ManagedPrometheusConfig = canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, des.ManagedPrometheusConfig, nw.ManagedPrometheusConfig) + + return nw +} + +func canonicalizeNewClusterMonitoringConfigSet(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigSlice(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des, initial *ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + + return cDes +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfigSlice(des, initial []ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfigManagedPrometheusConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c *Client, des, nw *ClusterMonitoringConfigManagedPrometheusConfig) *ClusterMonitoringConfigManagedPrometheusConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfigManagedPrometheusConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + + return nw +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSet(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfigManagedPrometheusConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfigManagedPrometheusConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &n)) +{{- end }} + } + + return items +} + +func canonicalizeClusterBinaryAuthorization(des, initial *ClusterBinaryAuthorization, opts ...dcl.ApplyOption) *ClusterBinaryAuthorization { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterBinaryAuthorization{} + + if dcl.IsZeroValue(des.EvaluationMode) || (dcl.IsEmptyValueIndirect(des.EvaluationMode) && dcl.IsEmptyValueIndirect(initial.EvaluationMode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EvaluationMode = initial.EvaluationMode + } else { + cDes.EvaluationMode = des.EvaluationMode + } + + return cDes +} + +func canonicalizeClusterBinaryAuthorizationSlice(des, initial []ClusterBinaryAuthorization, opts ...dcl.ApplyOption) []ClusterBinaryAuthorization { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterBinaryAuthorization, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterBinaryAuthorization(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterBinaryAuthorization, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterBinaryAuthorization(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterBinaryAuthorization(c *Client, des, nw *ClusterBinaryAuthorization) *ClusterBinaryAuthorization { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterBinaryAuthorization while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterBinaryAuthorizationSet(c *Client, des, nw []ClusterBinaryAuthorization) []ClusterBinaryAuthorization { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterBinaryAuthorization + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterBinaryAuthorizationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterBinaryAuthorization(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterBinaryAuthorizationSlice(c *Client, des, nw []ClusterBinaryAuthorization) []ClusterBinaryAuthorization { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterBinaryAuthorization + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterBinaryAuthorization(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffCluster(c *Client, desired, actual *Cluster, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Networking, actual.Networking, dcl.DiffInfo{ObjectFunction: compareClusterNetworkingNewStyle, EmptyObject: EmptyClusterNetworking, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Networking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AwsRegion, actual.AwsRegion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AwsRegion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ControlPlane, actual.ControlPlane, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneNewStyle, EmptyObject: EmptyClusterControlPlane, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ControlPlane")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Authorization, actual.Authorization, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationNewStyle, EmptyObject: EmptyClusterAuthorization, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Authorization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadIdentityConfig, actual.WorkloadIdentityConfig, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterWorkloadIdentityConfigNewStyle, EmptyObject: EmptyClusterWorkloadIdentityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadIdentityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fleet, actual.Fleet, dcl.DiffInfo{ObjectFunction: compareClusterFleetNewStyle, EmptyObject: EmptyClusterFleet, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fleet")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- if ne $.TargetVersionName "ga" }} + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigNewStyle, EmptyObject: EmptyClusterLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MonitoringConfig, actual.MonitoringConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoringConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- end }} + if ds, err := dcl.Diff(desired.BinaryAuthorization, actual.BinaryAuthorization, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterBinaryAuthorizationNewStyle, EmptyObject: EmptyClusterBinaryAuthorization, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BinaryAuthorization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareClusterNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterNetworking) + if !ok { + desiredNotPointer, ok := d.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking or *ClusterNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterNetworking) + if !ok { + actualNotPointer, ok := a.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.VPCId, actual.VPCId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VpcId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodAddressCidrBlocks, actual.PodAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAddressCidrBlocks, actual.ServiceAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PerNodePoolSgRulesDisabled, actual.PerNodePoolSgRulesDisabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("PerNodePoolSgRulesDisabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlane) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane or *ClusterControlPlane", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlane) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceType, actual.InstanceType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("InstanceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneSshConfigNewStyle, EmptyObject: EmptyClusterControlPlaneSshConfig, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetIds, actual.SubnetIds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConfigEncryption, actual.ConfigEncryption, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneConfigEncryptionNewStyle, EmptyObject: EmptyClusterControlPlaneConfigEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityGroupIds, actual.SecurityGroupIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SecurityGroupIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IamInstanceProfile, actual.IamInstanceProfile, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("IamInstanceProfile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneRootVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainVolume, actual.MainVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneMainVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneMainVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DatabaseEncryption, actual.DatabaseEncryption, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneDatabaseEncryptionNewStyle, EmptyObject: EmptyClusterControlPlaneDatabaseEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DatabaseEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AwsServicesAuthentication, actual.AwsServicesAuthentication, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneAwsServicesAuthenticationNewStyle, EmptyObject: EmptyClusterControlPlaneAwsServicesAuthentication, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AwsServicesAuthentication")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneProxyConfigNewStyle, EmptyObject: EmptyClusterControlPlaneProxyConfig, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- if ne $.TargetVersionName "ga" }} + + if ds, err := dcl.Diff(desired.InstancePlacement, actual.InstancePlacement, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneInstancePlacementNewStyle, EmptyObject: EmptyClusterControlPlaneInstancePlacement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstancePlacement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- end }} + return diffs, nil +} + +func compareClusterControlPlaneSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneSshConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig or *ClusterControlPlaneSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneSshConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Ec2KeyPair, actual.Ec2KeyPair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Ec2KeyPair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneConfigEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneConfigEncryption) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneConfigEncryption or *ClusterControlPlaneConfigEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneConfigEncryption) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneConfigEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneRootVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume or *ClusterControlPlaneRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneRootVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VolumeType, actual.VolumeType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("VolumeType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Iops, actual.Iops, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Iops")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneMainVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneMainVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume or *ClusterControlPlaneMainVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneMainVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VolumeType, actual.VolumeType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VolumeType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Iops, actual.Iops, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Iops")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneDatabaseEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption or *ClusterControlPlaneDatabaseEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneAwsServicesAuthenticationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneAwsServicesAuthentication) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneAwsServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneAwsServicesAuthentication or *ClusterControlPlaneAwsServicesAuthentication", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneAwsServicesAuthentication) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneAwsServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneAwsServicesAuthentication", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.RoleArn, actual.RoleArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("RoleArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RoleSessionName, actual.RoleSessionName, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("RoleSessionName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneProxyConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig or *ClusterControlPlaneProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneProxyConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SecretArn, actual.SecretArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SecretArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretVersion, actual.SecretVersion, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- if ne $.TargetVersionName "ga" }} +func compareClusterControlPlaneInstancePlacementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneInstancePlacement) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneInstancePlacement or *ClusterControlPlaneInstancePlacement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneInstancePlacement) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneInstancePlacement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Tenancy, actual.Tenancy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tenancy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- end }} +func compareClusterAuthorizationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorization) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization or *ClusterAuthorization", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorization) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AdminUsers, actual.AdminUsers, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminUsersNewStyle, EmptyObject: EmptyClusterAuthorizationAdminUsers, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("AdminUsers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AdminGroups, actual.AdminGroups, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminGroupsNewStyle, EmptyObject: EmptyClusterAuthorizationAdminGroups, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("AdminGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminUsersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminUsers) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers or *ClusterAuthorizationAdminUsers", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminUsers) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminGroupsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminGroups) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups or *ClusterAuthorizationAdminGroups", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminGroups) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Group, actual.Group, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Group")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterWorkloadIdentityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterWorkloadIdentityConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig or *ClusterWorkloadIdentityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterWorkloadIdentityConfig) + if !ok { + actualNotPointer, ok := a.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IssuerUri, actual.IssuerUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IssuerUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadPool, actual.WorkloadPool, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdentityProvider, actual.IdentityProvider, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityProvider")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterFleetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterFleet) + if !ok { + desiredNotPointer, ok := d.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet or *ClusterFleet", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterFleet) + if !ok { + actualNotPointer, ok := a.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig or *ClusterLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ComponentConfig, actual.ComponentConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigComponentConfigNewStyle, EmptyObject: EmptyClusterLoggingConfigComponentConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComponentConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigComponentConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfigComponentConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig or *ClusterLoggingConfigComponentConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfigComponentConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableComponents, actual.EnableComponents, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("EnableComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig or *ClusterMonitoringConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ManagedPrometheusConfig, actual.ManagedPrometheusConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigManagedPrometheusConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfigManagedPrometheusConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedPrometheusConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig or *ClusterMonitoringConfigManagedPrometheusConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterBinaryAuthorizationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterBinaryAuthorization) + if !ok { + desiredNotPointer, ok := d.(ClusterBinaryAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterBinaryAuthorization or *ClusterBinaryAuthorization", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterBinaryAuthorization) + if !ok { + actualNotPointer, ok := a.(ClusterBinaryAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterBinaryAuthorization", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EvaluationMode, actual.EvaluationMode, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("EvaluationMode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Cluster) urlNormalized() *Cluster { + normalized := dcl.Copy(*r).(Cluster) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.AwsRegion = dcl.SelfLinkToName(r.AwsRegion) + normalized.Endpoint = dcl.SelfLinkToName(r.Endpoint) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Cluster) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAwsCluster" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Cluster resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Cluster) marshal(c *Client) ([]byte, error) { + m, err := expandCluster(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Cluster: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalCluster decodes JSON responses into the Cluster resource schema. +func unmarshalCluster(b []byte, c *Client, res *Cluster) (*Cluster, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapCluster(m, c, res) +} + +func unmarshalMapCluster(m map[string]interface{}, c *Client, res *Cluster) (*Cluster, error) { + + flattened := flattenCluster(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandCluster expands Cluster into a JSON request object. +func expandCluster(c *Client, f *Cluster) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/awsClusters/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networking"] = v + } + if v := f.AwsRegion; dcl.ValueShouldBeSent(v) { + m["awsRegion"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authorization"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandClusterFleet(c, f.Fleet, res); err != nil { + return nil, fmt.Errorf("error expanding Fleet into fleet: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["fleet"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoringConfig"] = v +{{- end }} + } + if v, err := expandClusterBinaryAuthorization(c, f.BinaryAuthorization, res); err != nil { + return nil, fmt.Errorf("error expanding BinaryAuthorization into binaryAuthorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["binaryAuthorization"] = v + } + + return m, nil +} + +// flattenCluster flattens Cluster from a JSON request object into the +// Cluster type. +func flattenCluster(c *Client, i interface{}, res *Cluster) *Cluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Cluster{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Networking = flattenClusterNetworking(c, m["networking"], res) + resultRes.AwsRegion = dcl.FlattenString(m["awsRegion"]) + resultRes.ControlPlane = flattenClusterControlPlane(c, m["controlPlane"], res) + resultRes.Authorization = flattenClusterAuthorization(c, m["authorization"], res) + resultRes.State = flattenClusterStateEnum(m["state"]) + resultRes.Endpoint = dcl.FlattenString(m["endpoint"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.WorkloadIdentityConfig = flattenClusterWorkloadIdentityConfig(c, m["workloadIdentityConfig"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Fleet = flattenClusterFleet(c, m["fleet"], res) +{{- if ne $.TargetVersionName "ga" }} + resultRes.LoggingConfig = flattenClusterLoggingConfig(c, m["loggingConfig"], res) + resultRes.MonitoringConfig = flattenClusterMonitoringConfig(c, m["monitoringConfig"], res) +{{- end }} + resultRes.BinaryAuthorization = flattenClusterBinaryAuthorization(c, m["binaryAuthorization"], res) + + return resultRes +} + +// expandClusterNetworkingMap expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingMap(c *Client, f map[string]ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterNetworkingSlice expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingSlice(c *Client, f []ClusterNetworking, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterNetworkingMap flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingMap(c *Client, i interface{}, res *Cluster) map[string]ClusterNetworking { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterNetworking{} + } + + if len(a) == 0 { + return map[string]ClusterNetworking{} + } + + items := make(map[string]ClusterNetworking) + for k, item := range a { + items[k] = *flattenClusterNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterNetworkingSlice flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingSlice(c *Client, i interface{}, res *Cluster) []ClusterNetworking { + a, ok := i.([]interface{}) + if !ok { + return []ClusterNetworking{} + } + + if len(a) == 0 { + return []ClusterNetworking{} + } + + items := make([]ClusterNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterNetworking(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterNetworking expands an instance of ClusterNetworking into a JSON +// request object. +func expandClusterNetworking(c *Client, f *ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.VPCId; !dcl.IsEmptyValueIndirect(v) { + m["vpcId"] = v + } + if v := f.PodAddressCidrBlocks; v != nil { + m["podAddressCidrBlocks"] = v + } + if v := f.ServiceAddressCidrBlocks; v != nil { + m["serviceAddressCidrBlocks"] = v + } + if v := f.PerNodePoolSgRulesDisabled; !dcl.IsEmptyValueIndirect(v) { + m["perNodePoolSgRulesDisabled"] = v + } + + return m, nil +} + +// flattenClusterNetworking flattens an instance of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworking(c *Client, i interface{}, res *Cluster) *ClusterNetworking { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterNetworking + } + r.VPCId = dcl.FlattenString(m["vpcId"]) + r.PodAddressCidrBlocks = dcl.FlattenStringSlice(m["podAddressCidrBlocks"]) + r.ServiceAddressCidrBlocks = dcl.FlattenStringSlice(m["serviceAddressCidrBlocks"]) + r.PerNodePoolSgRulesDisabled = dcl.FlattenBool(m["perNodePoolSgRulesDisabled"]) + + return r +} + +// expandClusterControlPlaneMap expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneMap(c *Client, f map[string]ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSlice expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneSlice(c *Client, f []ClusterControlPlane, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMap flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlane { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlane{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlane{} + } + + items := make(map[string]ClusterControlPlane) + for k, item := range a { + items[k] = *flattenClusterControlPlane(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSlice flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlane { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlane{} + } + + if len(a) == 0 { + return []ClusterControlPlane{} + } + + items := make([]ClusterControlPlane, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlane(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlane expands an instance of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlane(c *Client, f *ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v := f.InstanceType; !dcl.IsEmptyValueIndirect(v) { + m["instanceType"] = v + } + if v, err := expandClusterControlPlaneSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } + if v := f.SubnetIds; v != nil { + m["subnetIds"] = v + } + if v, err := expandClusterControlPlaneConfigEncryption(c, f.ConfigEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding ConfigEncryption into configEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["configEncryption"] = v + } + if v := f.SecurityGroupIds; v != nil { + m["securityGroupIds"] = v + } + if v := f.IamInstanceProfile; !dcl.IsEmptyValueIndirect(v) { + m["iamInstanceProfile"] = v + } + if v, err := expandClusterControlPlaneRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v, err := expandClusterControlPlaneMainVolume(c, f.MainVolume, res); err != nil { + return nil, fmt.Errorf("error expanding MainVolume into mainVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["mainVolume"] = v + } + if v, err := expandClusterControlPlaneDatabaseEncryption(c, f.DatabaseEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding DatabaseEncryption into databaseEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["databaseEncryption"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v, err := expandClusterControlPlaneAwsServicesAuthentication(c, f.AwsServicesAuthentication, res); err != nil { + return nil, fmt.Errorf("error expanding AwsServicesAuthentication into awsServicesAuthentication: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["awsServicesAuthentication"] = v + } + if v, err := expandClusterControlPlaneProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterControlPlaneInstancePlacement(c, f.InstancePlacement, res); err != nil { + return nil, fmt.Errorf("error expanding InstancePlacement into instancePlacement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["instancePlacement"] = v +{{- end }} + } + + return m, nil +} + +// flattenClusterControlPlane flattens an instance of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlane(c *Client, i interface{}, res *Cluster) *ClusterControlPlane { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlane{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlane + } + r.Version = dcl.FlattenString(m["version"]) + r.InstanceType = dcl.FlattenString(m["instanceType"]) + r.SshConfig = flattenClusterControlPlaneSshConfig(c, m["sshConfig"], res) + r.SubnetIds = dcl.FlattenStringSlice(m["subnetIds"]) + r.ConfigEncryption = flattenClusterControlPlaneConfigEncryption(c, m["configEncryption"], res) + r.SecurityGroupIds = dcl.FlattenStringSlice(m["securityGroupIds"]) + r.IamInstanceProfile = dcl.FlattenString(m["iamInstanceProfile"]) + r.RootVolume = flattenClusterControlPlaneRootVolume(c, m["rootVolume"], res) + r.MainVolume = flattenClusterControlPlaneMainVolume(c, m["mainVolume"], res) + r.DatabaseEncryption = flattenClusterControlPlaneDatabaseEncryption(c, m["databaseEncryption"], res) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.AwsServicesAuthentication = flattenClusterControlPlaneAwsServicesAuthentication(c, m["awsServicesAuthentication"], res) + r.ProxyConfig = flattenClusterControlPlaneProxyConfig(c, m["proxyConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.InstancePlacement = flattenClusterControlPlaneInstancePlacement(c, m["instancePlacement"], res) +{{- end }} + + return r +} + +// expandClusterControlPlaneSshConfigMap expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigMap(c *Client, f map[string]ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSshConfigSlice expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigSlice(c *Client, f []ClusterControlPlaneSshConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneSshConfigMap flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneSshConfig{} + } + + items := make(map[string]ClusterControlPlaneSshConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSshConfigSlice flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneSshConfig{} + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneSshConfig expands an instance of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfig(c *Client, f *ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Ec2KeyPair; !dcl.IsEmptyValueIndirect(v) { + m["ec2KeyPair"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneSshConfig flattens an instance of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneSshConfig + } + r.Ec2KeyPair = dcl.FlattenString(m["ec2KeyPair"]) + + return r +} + +// expandClusterControlPlaneConfigEncryptionMap expands the contents of ClusterControlPlaneConfigEncryption into a JSON +// request object. +func expandClusterControlPlaneConfigEncryptionMap(c *Client, f map[string]ClusterControlPlaneConfigEncryption, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneConfigEncryptionSlice expands the contents of ClusterControlPlaneConfigEncryption into a JSON +// request object. +func expandClusterControlPlaneConfigEncryptionSlice(c *Client, f []ClusterControlPlaneConfigEncryption, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneConfigEncryptionMap flattens the contents of ClusterControlPlaneConfigEncryption from a JSON +// response object. +func flattenClusterControlPlaneConfigEncryptionMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneConfigEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneConfigEncryption{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneConfigEncryption{} + } + + items := make(map[string]ClusterControlPlaneConfigEncryption) + for k, item := range a { + items[k] = *flattenClusterControlPlaneConfigEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneConfigEncryptionSlice flattens the contents of ClusterControlPlaneConfigEncryption from a JSON +// response object. +func flattenClusterControlPlaneConfigEncryptionSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneConfigEncryption { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneConfigEncryption{} + } + + if len(a) == 0 { + return []ClusterControlPlaneConfigEncryption{} + } + + items := make([]ClusterControlPlaneConfigEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneConfigEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneConfigEncryption expands an instance of ClusterControlPlaneConfigEncryption into a JSON +// request object. +func expandClusterControlPlaneConfigEncryption(c *Client, f *ClusterControlPlaneConfigEncryption, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneConfigEncryption flattens an instance of ClusterControlPlaneConfigEncryption from a JSON +// response object. +func flattenClusterControlPlaneConfigEncryption(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneConfigEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneConfigEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneConfigEncryption + } + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneRootVolumeMap expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeMap(c *Client, f map[string]ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneRootVolumeSlice expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeSlice(c *Client, f []ClusterControlPlaneRootVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneRootVolumeMap flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneRootVolume{} + } + + items := make(map[string]ClusterControlPlaneRootVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeSlice flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneRootVolume{} + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneRootVolume expands an instance of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolume(c *Client, f *ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + if v := f.VolumeType; !dcl.IsEmptyValueIndirect(v) { + m["volumeType"] = v + } + if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { + m["iops"] = v + } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneRootVolume flattens an instance of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + r.VolumeType = flattenClusterControlPlaneRootVolumeVolumeTypeEnum(m["volumeType"]) + r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneMainVolumeMap expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeMap(c *Client, f map[string]ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneMainVolumeSlice expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeSlice(c *Client, f []ClusterControlPlaneMainVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMainVolumeMap flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneMainVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneMainVolume{} + } + + items := make(map[string]ClusterControlPlaneMainVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeSlice flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneMainVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneMainVolume{} + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneMainVolume expands an instance of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolume(c *Client, f *ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + if v := f.VolumeType; !dcl.IsEmptyValueIndirect(v) { + m["volumeType"] = v + } + if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { + m["iops"] = v + } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneMainVolume flattens an instance of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneMainVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneMainVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneMainVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + r.VolumeType = flattenClusterControlPlaneMainVolumeVolumeTypeEnum(m["volumeType"]) + r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneDatabaseEncryptionMap expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionMap(c *Client, f map[string]ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneDatabaseEncryptionSlice expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionSlice(c *Client, f []ClusterControlPlaneDatabaseEncryption, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneDatabaseEncryptionMap flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneDatabaseEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + items := make(map[string]ClusterControlPlaneDatabaseEncryption) + for k, item := range a { + items[k] = *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneDatabaseEncryptionSlice flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneDatabaseEncryption { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return []ClusterControlPlaneDatabaseEncryption{} + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneDatabaseEncryption expands an instance of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryption(c *Client, f *ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneDatabaseEncryption flattens an instance of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryption(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneDatabaseEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneDatabaseEncryption + } + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneAwsServicesAuthenticationMap expands the contents of ClusterControlPlaneAwsServicesAuthentication into a JSON +// request object. +func expandClusterControlPlaneAwsServicesAuthenticationMap(c *Client, f map[string]ClusterControlPlaneAwsServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneAwsServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneAwsServicesAuthenticationSlice expands the contents of ClusterControlPlaneAwsServicesAuthentication into a JSON +// request object. +func expandClusterControlPlaneAwsServicesAuthenticationSlice(c *Client, f []ClusterControlPlaneAwsServicesAuthentication, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneAwsServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneAwsServicesAuthenticationMap flattens the contents of ClusterControlPlaneAwsServicesAuthentication from a JSON +// response object. +func flattenClusterControlPlaneAwsServicesAuthenticationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneAwsServicesAuthentication { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneAwsServicesAuthentication{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneAwsServicesAuthentication{} + } + + items := make(map[string]ClusterControlPlaneAwsServicesAuthentication) + for k, item := range a { + items[k] = *flattenClusterControlPlaneAwsServicesAuthentication(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneAwsServicesAuthenticationSlice flattens the contents of ClusterControlPlaneAwsServicesAuthentication from a JSON +// response object. +func flattenClusterControlPlaneAwsServicesAuthenticationSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneAwsServicesAuthentication { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneAwsServicesAuthentication{} + } + + if len(a) == 0 { + return []ClusterControlPlaneAwsServicesAuthentication{} + } + + items := make([]ClusterControlPlaneAwsServicesAuthentication, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneAwsServicesAuthentication(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneAwsServicesAuthentication expands an instance of ClusterControlPlaneAwsServicesAuthentication into a JSON +// request object. +func expandClusterControlPlaneAwsServicesAuthentication(c *Client, f *ClusterControlPlaneAwsServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.RoleArn; !dcl.IsEmptyValueIndirect(v) { + m["roleArn"] = v + } + if v := f.RoleSessionName; !dcl.IsEmptyValueIndirect(v) { + m["roleSessionName"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneAwsServicesAuthentication flattens an instance of ClusterControlPlaneAwsServicesAuthentication from a JSON +// response object. +func flattenClusterControlPlaneAwsServicesAuthentication(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneAwsServicesAuthentication { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneAwsServicesAuthentication{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneAwsServicesAuthentication + } + r.RoleArn = dcl.FlattenString(m["roleArn"]) + r.RoleSessionName = dcl.FlattenString(m["roleSessionName"]) + + return r +} + +// expandClusterControlPlaneProxyConfigMap expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigMap(c *Client, f map[string]ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneProxyConfigSlice expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigSlice(c *Client, f []ClusterControlPlaneProxyConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneProxyConfigMap flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneProxyConfig{} + } + + items := make(map[string]ClusterControlPlaneProxyConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneProxyConfigSlice flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneProxyConfig{} + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneProxyConfig expands an instance of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfig(c *Client, f *ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SecretArn; !dcl.IsEmptyValueIndirect(v) { + m["secretArn"] = v + } + if v := f.SecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["secretVersion"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneProxyConfig flattens an instance of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneProxyConfig + } + r.SecretArn = dcl.FlattenString(m["secretArn"]) + r.SecretVersion = dcl.FlattenString(m["secretVersion"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandClusterControlPlaneInstancePlacementMap expands the contents of ClusterControlPlaneInstancePlacement into a JSON +// request object. +func expandClusterControlPlaneInstancePlacementMap(c *Client, f map[string]ClusterControlPlaneInstancePlacement, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneInstancePlacementSlice expands the contents of ClusterControlPlaneInstancePlacement into a JSON +// request object. +func expandClusterControlPlaneInstancePlacementSlice(c *Client, f []ClusterControlPlaneInstancePlacement, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneInstancePlacementMap flattens the contents of ClusterControlPlaneInstancePlacement from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneInstancePlacement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneInstancePlacement{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneInstancePlacement{} + } + + items := make(map[string]ClusterControlPlaneInstancePlacement) + for k, item := range a { + items[k] = *flattenClusterControlPlaneInstancePlacement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneInstancePlacementSlice flattens the contents of ClusterControlPlaneInstancePlacement from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneInstancePlacement { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneInstancePlacement{} + } + + if len(a) == 0 { + return []ClusterControlPlaneInstancePlacement{} + } + + items := make([]ClusterControlPlaneInstancePlacement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneInstancePlacement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneInstancePlacement expands an instance of ClusterControlPlaneInstancePlacement into a JSON +// request object. +func expandClusterControlPlaneInstancePlacement(c *Client, f *ClusterControlPlaneInstancePlacement, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Tenancy; !dcl.IsEmptyValueIndirect(v) { + m["tenancy"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneInstancePlacement flattens an instance of ClusterControlPlaneInstancePlacement from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacement(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneInstancePlacement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneInstancePlacement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneInstancePlacement + } + r.Tenancy = flattenClusterControlPlaneInstancePlacementTenancyEnum(m["tenancy"]) + + return r +} + +{{- end }} +// expandClusterAuthorizationMap expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationMap(c *Client, f map[string]ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationSlice expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationSlice(c *Client, f []ClusterAuthorization, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationMap flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorization { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorization{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorization{} + } + + items := make(map[string]ClusterAuthorization) + for k, item := range a { + items[k] = *flattenClusterAuthorization(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationSlice flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorization { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorization{} + } + + if len(a) == 0 { + return []ClusterAuthorization{} + } + + items := make([]ClusterAuthorization, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorization(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorization expands an instance of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorization(c *Client, f *ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterAuthorizationAdminUsersSlice(c, f.AdminUsers, res); err != nil { + return nil, fmt.Errorf("error expanding AdminUsers into adminUsers: %w", err) + } else if v != nil { + m["adminUsers"] = v + } + if v, err := expandClusterAuthorizationAdminGroupsSlice(c, f.AdminGroups, res); err != nil { + return nil, fmt.Errorf("error expanding AdminGroups into adminGroups: %w", err) + } else if v != nil { + m["adminGroups"] = v + } + + return m, nil +} + +// flattenClusterAuthorization flattens an instance of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorization(c *Client, i interface{}, res *Cluster) *ClusterAuthorization { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorization{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorization + } + r.AdminUsers = flattenClusterAuthorizationAdminUsersSlice(c, m["adminUsers"], res) + r.AdminGroups = flattenClusterAuthorizationAdminGroupsSlice(c, m["adminGroups"], res) + + return r +} + +// expandClusterAuthorizationAdminUsersMap expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersMap(c *Client, f map[string]ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminUsersSlice expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersSlice(c *Client, f []ClusterAuthorizationAdminUsers, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminUsersMap flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminUsers { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminUsers{} + } + + items := make(map[string]ClusterAuthorizationAdminUsers) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminUsersSlice flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminUsers { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminUsers{} + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminUsers expands an instance of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsers(c *Client, f *ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Username; !dcl.IsEmptyValueIndirect(v) { + m["username"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminUsers flattens an instance of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsers(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminUsers { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminUsers{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminUsers + } + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandClusterAuthorizationAdminGroupsMap expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsMap(c *Client, f map[string]ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminGroupsSlice expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsSlice(c *Client, f []ClusterAuthorizationAdminGroups, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminGroupsMap flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminGroups { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminGroups{} + } + + items := make(map[string]ClusterAuthorizationAdminGroups) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminGroupsSlice flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminGroups { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminGroups{} + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminGroups expands an instance of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroups(c *Client, f *ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Group; !dcl.IsEmptyValueIndirect(v) { + m["group"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminGroups flattens an instance of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroups(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminGroups { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminGroups{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminGroups + } + r.Group = dcl.FlattenString(m["group"]) + + return r +} + +// expandClusterWorkloadIdentityConfigMap expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigMap(c *Client, f map[string]ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterWorkloadIdentityConfigSlice expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigSlice(c *Client, f []ClusterWorkloadIdentityConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterWorkloadIdentityConfigMap flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterWorkloadIdentityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return map[string]ClusterWorkloadIdentityConfig{} + } + + items := make(map[string]ClusterWorkloadIdentityConfig) + for k, item := range a { + items[k] = *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterWorkloadIdentityConfigSlice flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterWorkloadIdentityConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return []ClusterWorkloadIdentityConfig{} + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterWorkloadIdentityConfig expands an instance of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfig(c *Client, f *ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IssuerUri; !dcl.IsEmptyValueIndirect(v) { + m["issuerUri"] = v + } + if v := f.WorkloadPool; !dcl.IsEmptyValueIndirect(v) { + m["workloadPool"] = v + } + if v := f.IdentityProvider; !dcl.IsEmptyValueIndirect(v) { + m["identityProvider"] = v + } + + return m, nil +} + +// flattenClusterWorkloadIdentityConfig flattens an instance of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfig(c *Client, i interface{}, res *Cluster) *ClusterWorkloadIdentityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterWorkloadIdentityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterWorkloadIdentityConfig + } + r.IssuerUri = dcl.FlattenString(m["issuerUri"]) + r.WorkloadPool = dcl.FlattenString(m["workloadPool"]) + r.IdentityProvider = dcl.FlattenString(m["identityProvider"]) + + return r +} + +// expandClusterFleetMap expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetMap(c *Client, f map[string]ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterFleetSlice expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetSlice(c *Client, f []ClusterFleet, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterFleetMap flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterFleet { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterFleet{} + } + + if len(a) == 0 { + return map[string]ClusterFleet{} + } + + items := make(map[string]ClusterFleet) + for k, item := range a { + items[k] = *flattenClusterFleet(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterFleetSlice flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetSlice(c *Client, i interface{}, res *Cluster) []ClusterFleet { + a, ok := i.([]interface{}) + if !ok { + return []ClusterFleet{} + } + + if len(a) == 0 { + return []ClusterFleet{} + } + + items := make([]ClusterFleet, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterFleet(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterFleet expands an instance of ClusterFleet into a JSON +// request object. +func expandClusterFleet(c *Client, f *ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := dcl.DeriveField("projects/%s", f.Project, dcl.SelfLinkToName(f.Project)); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenClusterFleet flattens an instance of ClusterFleet from a JSON +// response object. +func flattenClusterFleet(c *Client, i interface{}, res *Cluster) *ClusterFleet { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterFleet{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterFleet + } + r.Project = dcl.FlattenString(m["project"]) + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandClusterLoggingConfigMap expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigMap(c *Client, f map[string]ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigSlice expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigSlice(c *Client, f []ClusterLoggingConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigMap flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfig{} + } + + items := make(map[string]ClusterLoggingConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigSlice flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfig{} + } + + items := make([]ClusterLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfig expands an instance of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfig(c *Client, f *ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterLoggingConfigComponentConfig(c, f.ComponentConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ComponentConfig into componentConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["componentConfig"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfig flattens an instance of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfig + } + r.ComponentConfig = flattenClusterLoggingConfigComponentConfig(c, m["componentConfig"], res) + + return r +} + +// expandClusterLoggingConfigComponentConfigMap expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigMap(c *Client, f map[string]ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigComponentConfigSlice expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigSlice(c *Client, f []ClusterLoggingConfigComponentConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigComponentConfigMap flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigSlice flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfig{} + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfigComponentConfig expands an instance of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfig(c *Client, f *ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableComponents; v != nil { + m["enableComponents"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfigComponentConfig flattens an instance of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfigComponentConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfigComponentConfig + } + r.EnableComponents = flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c, m["enableComponents"], res) + + return r +} + +// expandClusterMonitoringConfigMap expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigMap(c *Client, f map[string]ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigSlice expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigSlice(c *Client, f []ClusterMonitoringConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigMap flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfig{} + } + + items := make(map[string]ClusterMonitoringConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigSlice flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfig{} + } + + items := make([]ClusterMonitoringConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfig expands an instance of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfig(c *Client, f *ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, f.ManagedPrometheusConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ManagedPrometheusConfig into managedPrometheusConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["managedPrometheusConfig"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfig flattens an instance of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfig + } + r.ManagedPrometheusConfig = flattenClusterMonitoringConfigManagedPrometheusConfig(c, m["managedPrometheusConfig"], res) + + return r +} + +// expandClusterMonitoringConfigManagedPrometheusConfigMap expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, f map[string]ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigManagedPrometheusConfigSlice expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, f []ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigMap flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make(map[string]ClusterMonitoringConfigManagedPrometheusConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigSlice flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfigManagedPrometheusConfig expands an instance of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfig(c *Client, f *ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfig flattens an instance of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfigManagedPrometheusConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfigManagedPrometheusConfig + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + + return r +} + +{{- end }} +// expandClusterBinaryAuthorizationMap expands the contents of ClusterBinaryAuthorization into a JSON +// request object. +func expandClusterBinaryAuthorizationMap(c *Client, f map[string]ClusterBinaryAuthorization, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterBinaryAuthorization(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterBinaryAuthorizationSlice expands the contents of ClusterBinaryAuthorization into a JSON +// request object. +func expandClusterBinaryAuthorizationSlice(c *Client, f []ClusterBinaryAuthorization, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterBinaryAuthorization(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterBinaryAuthorizationMap flattens the contents of ClusterBinaryAuthorization from a JSON +// response object. +func flattenClusterBinaryAuthorizationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterBinaryAuthorization { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterBinaryAuthorization{} + } + + if len(a) == 0 { + return map[string]ClusterBinaryAuthorization{} + } + + items := make(map[string]ClusterBinaryAuthorization) + for k, item := range a { + items[k] = *flattenClusterBinaryAuthorization(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterBinaryAuthorizationSlice flattens the contents of ClusterBinaryAuthorization from a JSON +// response object. +func flattenClusterBinaryAuthorizationSlice(c *Client, i interface{}, res *Cluster) []ClusterBinaryAuthorization { + a, ok := i.([]interface{}) + if !ok { + return []ClusterBinaryAuthorization{} + } + + if len(a) == 0 { + return []ClusterBinaryAuthorization{} + } + + items := make([]ClusterBinaryAuthorization, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterBinaryAuthorization(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterBinaryAuthorization expands an instance of ClusterBinaryAuthorization into a JSON +// request object. +func expandClusterBinaryAuthorization(c *Client, f *ClusterBinaryAuthorization, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EvaluationMode; !dcl.IsEmptyValueIndirect(v) { + m["evaluationMode"] = v + } + + return m, nil +} + +// flattenClusterBinaryAuthorization flattens an instance of ClusterBinaryAuthorization from a JSON +// response object. +func flattenClusterBinaryAuthorization(c *Client, i interface{}, res *Cluster) *ClusterBinaryAuthorization { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterBinaryAuthorization{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterBinaryAuthorization + } + r.EvaluationMode = flattenClusterBinaryAuthorizationEvaluationModeEnum(m["evaluationMode"]) + + return r +} + +// flattenClusterControlPlaneRootVolumeVolumeTypeEnumMap flattens the contents of ClusterControlPlaneRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeVolumeTypeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + items := make(map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum) + for k, item := range a { + items[k] = *flattenClusterControlPlaneRootVolumeVolumeTypeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeVolumeTypeEnumSlice flattens the contents of ClusterControlPlaneRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeVolumeTypeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneRootVolumeVolumeTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return []ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + items := make([]ClusterControlPlaneRootVolumeVolumeTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneRootVolumeVolumeTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeVolumeTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterControlPlaneRootVolumeVolumeTypeEnum with the same value as that string. +func flattenClusterControlPlaneRootVolumeVolumeTypeEnum(i interface{}) *ClusterControlPlaneRootVolumeVolumeTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterControlPlaneRootVolumeVolumeTypeEnumRef(s) +} + +// flattenClusterControlPlaneMainVolumeVolumeTypeEnumMap flattens the contents of ClusterControlPlaneMainVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeVolumeTypeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + items := make(map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum) + for k, item := range a { + items[k] = *flattenClusterControlPlaneMainVolumeVolumeTypeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeVolumeTypeEnumSlice flattens the contents of ClusterControlPlaneMainVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeVolumeTypeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneMainVolumeVolumeTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return []ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + items := make([]ClusterControlPlaneMainVolumeVolumeTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneMainVolumeVolumeTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeVolumeTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterControlPlaneMainVolumeVolumeTypeEnum with the same value as that string. +func flattenClusterControlPlaneMainVolumeVolumeTypeEnum(i interface{}) *ClusterControlPlaneMainVolumeVolumeTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterControlPlaneMainVolumeVolumeTypeEnumRef(s) +} + +{{- if ne $.TargetVersionName "ga" }} +// flattenClusterControlPlaneInstancePlacementTenancyEnumMap flattens the contents of ClusterControlPlaneInstancePlacementTenancyEnum from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementTenancyEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneInstancePlacementTenancyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + items := make(map[string]ClusterControlPlaneInstancePlacementTenancyEnum) + for k, item := range a { + items[k] = *flattenClusterControlPlaneInstancePlacementTenancyEnum(item.(interface{})) + } + + return items +} + +// flattenClusterControlPlaneInstancePlacementTenancyEnumSlice flattens the contents of ClusterControlPlaneInstancePlacementTenancyEnum from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementTenancyEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneInstancePlacementTenancyEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return []ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + items := make([]ClusterControlPlaneInstancePlacementTenancyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneInstancePlacementTenancyEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterControlPlaneInstancePlacementTenancyEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterControlPlaneInstancePlacementTenancyEnum with the same value as that string. +func flattenClusterControlPlaneInstancePlacementTenancyEnum(i interface{}) *ClusterControlPlaneInstancePlacementTenancyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterControlPlaneInstancePlacementTenancyEnumRef(s) +} + +{{- end }} +// flattenClusterStateEnumMap flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStateEnum{} + } + + items := make(map[string]ClusterStateEnum) + for k, item := range a { + items[k] = *flattenClusterStateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStateEnumSlice flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStateEnum{} + } + + if len(a) == 0 { + return []ClusterStateEnum{} + } + + items := make([]ClusterStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStateEnum with the same value as that string. +func flattenClusterStateEnum(i interface{}) *ClusterStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStateEnumRef(s) +{{- if ne $.TargetVersionName "ga" }} +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the same value as that string. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(i interface{}) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s) +{{- end }} +} + +// flattenClusterBinaryAuthorizationEvaluationModeEnumMap flattens the contents of ClusterBinaryAuthorizationEvaluationModeEnum from a JSON +// response object. +func flattenClusterBinaryAuthorizationEvaluationModeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterBinaryAuthorizationEvaluationModeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + items := make(map[string]ClusterBinaryAuthorizationEvaluationModeEnum) + for k, item := range a { + items[k] = *flattenClusterBinaryAuthorizationEvaluationModeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterBinaryAuthorizationEvaluationModeEnumSlice flattens the contents of ClusterBinaryAuthorizationEvaluationModeEnum from a JSON +// response object. +func flattenClusterBinaryAuthorizationEvaluationModeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterBinaryAuthorizationEvaluationModeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + if len(a) == 0 { + return []ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + items := make([]ClusterBinaryAuthorizationEvaluationModeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterBinaryAuthorizationEvaluationModeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterBinaryAuthorizationEvaluationModeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterBinaryAuthorizationEvaluationModeEnum with the same value as that string. +func flattenClusterBinaryAuthorizationEvaluationModeEnum(i interface{}) *ClusterBinaryAuthorizationEvaluationModeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterBinaryAuthorizationEvaluationModeEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Cluster) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalCluster(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clusterDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clusterApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClusterDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clusterDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clusterDiff + // For each operation name, create a clusterDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clusterDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClusterApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClusterApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clusterApiOperation, error) { + switch opName { + + case "updateClusterUpdateAwsClusterOperation": + return &updateClusterUpdateAwsClusterOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClusterFields(r *Cluster) error { + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := extractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := extractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := extractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := extractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := extractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := extractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := extractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + vBinaryAuthorization := r.BinaryAuthorization + if vBinaryAuthorization == nil { + // note: explicitly not the empty object. + vBinaryAuthorization = &ClusterBinaryAuthorization{} + } + if err := extractClusterBinaryAuthorizationFields(r, vBinaryAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBinaryAuthorization) { + r.BinaryAuthorization = vBinaryAuthorization + } + return nil +} +func extractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func extractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &ClusterControlPlaneConfigEncryption{} + } + if err := extractClusterControlPlaneConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vAwsServicesAuthentication := o.AwsServicesAuthentication + if vAwsServicesAuthentication == nil { + // note: explicitly not the empty object. + vAwsServicesAuthentication = &ClusterControlPlaneAwsServicesAuthentication{} + } + if err := extractClusterControlPlaneAwsServicesAuthenticationFields(r, vAwsServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAwsServicesAuthentication) { + o.AwsServicesAuthentication = vAwsServicesAuthentication + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &ClusterControlPlaneInstancePlacement{} + } + if err := extractClusterControlPlaneInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + return nil +} +func extractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func extractClusterControlPlaneConfigEncryptionFields(r *Cluster, o *ClusterControlPlaneConfigEncryption) error { + return nil +} +func extractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func extractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func extractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func extractClusterControlPlaneAwsServicesAuthenticationFields(r *Cluster, o *ClusterControlPlaneAwsServicesAuthentication) error { + return nil +} +func extractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractClusterControlPlaneInstancePlacementFields(r *Cluster, o *ClusterControlPlaneInstancePlacement) error { + return nil +} +{{- end }} +func extractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func extractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func extractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func extractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func extractClusterFleetFields(r *Cluster, o *ClusterFleet) error { +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func extractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func extractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func extractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func extractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { +{{- end }} + return nil +} +func extractClusterBinaryAuthorizationFields(r *Cluster, o *ClusterBinaryAuthorization) error { + return nil +} + +func postReadExtractClusterFields(r *Cluster) error { + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := postReadExtractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := postReadExtractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := postReadExtractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := postReadExtractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := postReadExtractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := postReadExtractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := postReadExtractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + vBinaryAuthorization := r.BinaryAuthorization + if vBinaryAuthorization == nil { + // note: explicitly not the empty object. + vBinaryAuthorization = &ClusterBinaryAuthorization{} + } + if err := postReadExtractClusterBinaryAuthorizationFields(r, vBinaryAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBinaryAuthorization) { + r.BinaryAuthorization = vBinaryAuthorization + } + return nil +} +func postReadExtractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func postReadExtractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &ClusterControlPlaneConfigEncryption{} + } + if err := extractClusterControlPlaneConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vAwsServicesAuthentication := o.AwsServicesAuthentication + if vAwsServicesAuthentication == nil { + // note: explicitly not the empty object. + vAwsServicesAuthentication = &ClusterControlPlaneAwsServicesAuthentication{} + } + if err := extractClusterControlPlaneAwsServicesAuthenticationFields(r, vAwsServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAwsServicesAuthentication) { + o.AwsServicesAuthentication = vAwsServicesAuthentication + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &ClusterControlPlaneInstancePlacement{} + } + if err := extractClusterControlPlaneInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + return nil +} +func postReadExtractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func postReadExtractClusterControlPlaneConfigEncryptionFields(r *Cluster, o *ClusterControlPlaneConfigEncryption) error { + return nil +} +func postReadExtractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func postReadExtractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func postReadExtractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func postReadExtractClusterControlPlaneAwsServicesAuthenticationFields(r *Cluster, o *ClusterControlPlaneAwsServicesAuthentication) error { + return nil +} +func postReadExtractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractClusterControlPlaneInstancePlacementFields(r *Cluster, o *ClusterControlPlaneInstancePlacement) error { + return nil +} +{{- end }} +func postReadExtractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func postReadExtractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func postReadExtractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func postReadExtractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func postReadExtractClusterFleetFields(r *Cluster, o *ClusterFleet) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func postReadExtractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func postReadExtractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func postReadExtractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { + return nil +} +{{- end }} +func postReadExtractClusterBinaryAuthorizationFields(r *Cluster, o *ClusterBinaryAuthorization) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl new file mode 100644 index 000000000000..45c432b3df10 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl @@ -0,0 +1,1309 @@ +package containeraws + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type NodePool struct { + Name *string `json:"name"` + Version *string `json:"version"` + Config *NodePoolConfig `json:"config"` + Autoscaling *NodePoolAutoscaling `json:"autoscaling"` + SubnetId *string `json:"subnetId"` + State *NodePoolStateEnum `json:"state"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + MaxPodsConstraint *NodePoolMaxPodsConstraint `json:"maxPodsConstraint"` + Management *NodePoolManagement `json:"management"` + KubeletConfig *NodePoolKubeletConfig `json:"kubeletConfig"` + UpdateSettings *NodePoolUpdateSettings `json:"updateSettings"` + Project *string `json:"project"` + Location *string `json:"location"` + Cluster *string `json:"cluster"` +} + +func (r *NodePool) String() string { + return dcl.SprintResource(r) +} + +// The enum NodePoolConfigRootVolumeVolumeTypeEnum. +type NodePoolConfigRootVolumeVolumeTypeEnum string + +// NodePoolConfigRootVolumeVolumeTypeEnumRef returns a *NodePoolConfigRootVolumeVolumeTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolConfigRootVolumeVolumeTypeEnumRef(s string) *NodePoolConfigRootVolumeVolumeTypeEnum { + v := NodePoolConfigRootVolumeVolumeTypeEnum(s) + return &v +} + +func (v NodePoolConfigRootVolumeVolumeTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"VOLUME_TYPE_UNSPECIFIED", "GP2", "GP3"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolConfigRootVolumeVolumeTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum NodePoolConfigTaintsEffectEnum. +type NodePoolConfigTaintsEffectEnum string + +// NodePoolConfigTaintsEffectEnumRef returns a *NodePoolConfigTaintsEffectEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolConfigTaintsEffectEnumRef(s string) *NodePoolConfigTaintsEffectEnum { + v := NodePoolConfigTaintsEffectEnum(s) + return &v +} + +func (v NodePoolConfigTaintsEffectEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EFFECT_UNSPECIFIED", "NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolConfigTaintsEffectEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- if ne $.TargetVersionName "ga" }} +// The enum NodePoolConfigInstancePlacementTenancyEnum. +type NodePoolConfigInstancePlacementTenancyEnum string + +// NodePoolConfigInstancePlacementTenancyEnumRef returns a *NodePoolConfigInstancePlacementTenancyEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolConfigInstancePlacementTenancyEnumRef(s string) *NodePoolConfigInstancePlacementTenancyEnum { + v := NodePoolConfigInstancePlacementTenancyEnum(s) + return &v +} + +func (v NodePoolConfigInstancePlacementTenancyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TENANCY_UNSPECIFIED", "DEFAULT", "DEDICATED", "HOST"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolConfigInstancePlacementTenancyEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- end }} +// The enum NodePoolStateEnum. +type NodePoolStateEnum string + +// NodePoolStateEnumRef returns a *NodePoolStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolStateEnumRef(s string) *NodePoolStateEnum { + v := NodePoolStateEnum(s) + return &v +} + +func (v NodePoolStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum NodePoolKubeletConfigCpuManagerPolicyEnum. +type NodePoolKubeletConfigCpuManagerPolicyEnum string + +// NodePoolKubeletConfigCpuManagerPolicyEnumRef returns a *NodePoolKubeletConfigCpuManagerPolicyEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolKubeletConfigCpuManagerPolicyEnumRef(s string) *NodePoolKubeletConfigCpuManagerPolicyEnum { + v := NodePoolKubeletConfigCpuManagerPolicyEnum(s) + return &v +} + +func (v NodePoolKubeletConfigCpuManagerPolicyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"none", "static"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolKubeletConfigCpuManagerPolicyEnum", + Value: string(v), + Valid: []string{}, + } +} + +type NodePoolConfig struct { + empty bool `json:"-"` + InstanceType *string `json:"instanceType"` + RootVolume *NodePoolConfigRootVolume `json:"rootVolume"` + Taints []NodePoolConfigTaints `json:"taints"` + Labels map[string]string `json:"labels"` + Tags map[string]string `json:"tags"` + IamInstanceProfile *string `json:"iamInstanceProfile"` + ConfigEncryption *NodePoolConfigConfigEncryption `json:"configEncryption"` + SshConfig *NodePoolConfigSshConfig `json:"sshConfig"` +{{- if ne $.TargetVersionName "ga" }} + SpotConfig *NodePoolConfigSpotConfig `json:"spotConfig"` +{{- end }} + SecurityGroupIds []string `json:"securityGroupIds"` + ProxyConfig *NodePoolConfigProxyConfig `json:"proxyConfig"` +{{- if ne $.TargetVersionName "ga" }} + InstancePlacement *NodePoolConfigInstancePlacement `json:"instancePlacement"` + ImageType *string `json:"imageType"` +{{- end }} + AutoscalingMetricsCollection *NodePoolConfigAutoscalingMetricsCollection `json:"autoscalingMetricsCollection"` +} + +type jsonNodePoolConfig NodePoolConfig + +func (r *NodePoolConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfig + } else { + + r.InstanceType = res.InstanceType + + r.RootVolume = res.RootVolume + + r.Taints = res.Taints + + r.Labels = res.Labels + + r.Tags = res.Tags + + r.IamInstanceProfile = res.IamInstanceProfile + + r.ConfigEncryption = res.ConfigEncryption + + r.SshConfig = res.SshConfig + +{{- if ne $.TargetVersionName "ga" }} + r.SpotConfig = res.SpotConfig + +{{- end }} + r.SecurityGroupIds = res.SecurityGroupIds + + r.ProxyConfig = res.ProxyConfig +{{- if ne $.TargetVersionName "ga" }} + + r.InstancePlacement = res.InstancePlacement + + r.ImageType = res.ImageType +{{- end }} + + r.AutoscalingMetricsCollection = res.AutoscalingMetricsCollection + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfig *NodePoolConfig = &NodePoolConfig{empty: true} + +func (r *NodePoolConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` + VolumeType *NodePoolConfigRootVolumeVolumeTypeEnum `json:"volumeType"` + Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonNodePoolConfigRootVolume NodePoolConfigRootVolume + +func (r *NodePoolConfigRootVolume) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigRootVolume + } else { + + r.SizeGib = res.SizeGib + + r.VolumeType = res.VolumeType + + r.Iops = res.Iops + + r.Throughput = res.Throughput + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigRootVolume *NodePoolConfigRootVolume = &NodePoolConfigRootVolume{empty: true} + +func (r *NodePoolConfigRootVolume) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigTaints struct { + empty bool `json:"-"` + Key *string `json:"key"` + Value *string `json:"value"` + Effect *NodePoolConfigTaintsEffectEnum `json:"effect"` +} + +type jsonNodePoolConfigTaints NodePoolConfigTaints + +func (r *NodePoolConfigTaints) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigTaints + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigTaints + } else { + + r.Key = res.Key + + r.Value = res.Value + + r.Effect = res.Effect + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigTaints is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigTaints *NodePoolConfigTaints = &NodePoolConfigTaints{empty: true} + +func (r *NodePoolConfigTaints) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigTaints) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigTaints) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigConfigEncryption struct { + empty bool `json:"-"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonNodePoolConfigConfigEncryption NodePoolConfigConfigEncryption + +func (r *NodePoolConfigConfigEncryption) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigConfigEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigConfigEncryption + } else { + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigConfigEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigConfigEncryption *NodePoolConfigConfigEncryption = &NodePoolConfigConfigEncryption{empty: true} + +func (r *NodePoolConfigConfigEncryption) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigConfigEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigConfigEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigSshConfig struct { + empty bool `json:"-"` + Ec2KeyPair *string `json:"ec2KeyPair"` +} + +type jsonNodePoolConfigSshConfig NodePoolConfigSshConfig + +func (r *NodePoolConfigSshConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigSshConfig + } else { + + r.Ec2KeyPair = res.Ec2KeyPair + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigSshConfig *NodePoolConfigSshConfig = &NodePoolConfigSshConfig{empty: true} + +func (r *NodePoolConfigSshConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type NodePoolConfigSpotConfig struct { + empty bool `json:"-"` + InstanceTypes []string `json:"instanceTypes"` +} + +type jsonNodePoolConfigSpotConfig NodePoolConfigSpotConfig + +func (r *NodePoolConfigSpotConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigSpotConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigSpotConfig + } else { + + r.InstanceTypes = res.InstanceTypes + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigSpotConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigSpotConfig *NodePoolConfigSpotConfig = &NodePoolConfigSpotConfig{empty: true} + +func (r *NodePoolConfigSpotConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigSpotConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigSpotConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +type NodePoolConfigProxyConfig struct { + empty bool `json:"-"` + SecretArn *string `json:"secretArn"` + SecretVersion *string `json:"secretVersion"` +} + +type jsonNodePoolConfigProxyConfig NodePoolConfigProxyConfig + +func (r *NodePoolConfigProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigProxyConfig + } else { + + r.SecretArn = res.SecretArn + + r.SecretVersion = res.SecretVersion + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigProxyConfig *NodePoolConfigProxyConfig = &NodePoolConfigProxyConfig{empty: true} + +func (r *NodePoolConfigProxyConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigProxyConfig) HashCode() string { +{{- if ne $.TargetVersionName "ga" }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigInstancePlacement struct { + empty bool `json:"-"` + Tenancy *NodePoolConfigInstancePlacementTenancyEnum `json:"tenancy"` +} + +type jsonNodePoolConfigInstancePlacement NodePoolConfigInstancePlacement + +func (r *NodePoolConfigInstancePlacement) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigInstancePlacement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigInstancePlacement + } else { + + r.Tenancy = res.Tenancy + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigInstancePlacement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigInstancePlacement *NodePoolConfigInstancePlacement = &NodePoolConfigInstancePlacement{empty: true} + +func (r *NodePoolConfigInstancePlacement) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigInstancePlacement) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigInstancePlacement) HashCode() string { +{{- end }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigAutoscalingMetricsCollection struct { + empty bool `json:"-"` + Granularity *string `json:"granularity"` + Metrics []string `json:"metrics"` +} + +type jsonNodePoolConfigAutoscalingMetricsCollection NodePoolConfigAutoscalingMetricsCollection + +func (r *NodePoolConfigAutoscalingMetricsCollection) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigAutoscalingMetricsCollection + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigAutoscalingMetricsCollection + } else { + + r.Granularity = res.Granularity + + r.Metrics = res.Metrics + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigAutoscalingMetricsCollection is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigAutoscalingMetricsCollection *NodePoolConfigAutoscalingMetricsCollection = &NodePoolConfigAutoscalingMetricsCollection{empty: true} + +func (r *NodePoolConfigAutoscalingMetricsCollection) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigAutoscalingMetricsCollection) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigAutoscalingMetricsCollection) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolAutoscaling struct { + empty bool `json:"-"` + MinNodeCount *int64 `json:"minNodeCount"` + MaxNodeCount *int64 `json:"maxNodeCount"` +} + +type jsonNodePoolAutoscaling NodePoolAutoscaling + +func (r *NodePoolAutoscaling) UnmarshalJSON(data []byte) error { + var res jsonNodePoolAutoscaling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolAutoscaling + } else { + + r.MinNodeCount = res.MinNodeCount + + r.MaxNodeCount = res.MaxNodeCount + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolAutoscaling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolAutoscaling *NodePoolAutoscaling = &NodePoolAutoscaling{empty: true} + +func (r *NodePoolAutoscaling) Empty() bool { + return r.empty +} + +func (r *NodePoolAutoscaling) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolAutoscaling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolMaxPodsConstraint struct { + empty bool `json:"-"` + MaxPodsPerNode *int64 `json:"maxPodsPerNode"` +} + +type jsonNodePoolMaxPodsConstraint NodePoolMaxPodsConstraint + +func (r *NodePoolMaxPodsConstraint) UnmarshalJSON(data []byte) error { + var res jsonNodePoolMaxPodsConstraint + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolMaxPodsConstraint + } else { + + r.MaxPodsPerNode = res.MaxPodsPerNode + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolMaxPodsConstraint is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolMaxPodsConstraint *NodePoolMaxPodsConstraint = &NodePoolMaxPodsConstraint{empty: true} + +func (r *NodePoolMaxPodsConstraint) Empty() bool { + return r.empty +} + +func (r *NodePoolMaxPodsConstraint) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolMaxPodsConstraint) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolManagement struct { + empty bool `json:"-"` + AutoRepair *bool `json:"autoRepair"` +} + +type jsonNodePoolManagement NodePoolManagement + +func (r *NodePoolManagement) UnmarshalJSON(data []byte) error { + var res jsonNodePoolManagement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolManagement + } else { + + r.AutoRepair = res.AutoRepair + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolManagement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolManagement *NodePoolManagement = &NodePoolManagement{empty: true} + +func (r *NodePoolManagement) Empty() bool { + return r.empty +} + +func (r *NodePoolManagement) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolManagement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolKubeletConfig struct { + empty bool `json:"-"` + CpuManagerPolicy *NodePoolKubeletConfigCpuManagerPolicyEnum `json:"cpuManagerPolicy"` + CpuCfsQuota *bool `json:"cpuCfsQuota"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod"` + PodPidsLimit *int64 `json:"podPidsLimit"` +} + +type jsonNodePoolKubeletConfig NodePoolKubeletConfig + +func (r *NodePoolKubeletConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolKubeletConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolKubeletConfig + } else { + + r.CpuManagerPolicy = res.CpuManagerPolicy + + r.CpuCfsQuota = res.CpuCfsQuota + + r.CpuCfsQuotaPeriod = res.CpuCfsQuotaPeriod + + r.PodPidsLimit = res.PodPidsLimit + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolKubeletConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolKubeletConfig *NodePoolKubeletConfig = &NodePoolKubeletConfig{empty: true} + +func (r *NodePoolKubeletConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolKubeletConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolKubeletConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolUpdateSettings struct { + empty bool `json:"-"` + SurgeSettings *NodePoolUpdateSettingsSurgeSettings `json:"surgeSettings"` +} + +type jsonNodePoolUpdateSettings NodePoolUpdateSettings + +func (r *NodePoolUpdateSettings) UnmarshalJSON(data []byte) error { + var res jsonNodePoolUpdateSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolUpdateSettings + } else { + + r.SurgeSettings = res.SurgeSettings + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolUpdateSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolUpdateSettings *NodePoolUpdateSettings = &NodePoolUpdateSettings{empty: true} + +func (r *NodePoolUpdateSettings) Empty() bool { + return r.empty +} + +func (r *NodePoolUpdateSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolUpdateSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolUpdateSettingsSurgeSettings struct { + empty bool `json:"-"` + MaxSurge *int64 `json:"maxSurge"` + MaxUnavailable *int64 `json:"maxUnavailable"` +} + +type jsonNodePoolUpdateSettingsSurgeSettings NodePoolUpdateSettingsSurgeSettings + +func (r *NodePoolUpdateSettingsSurgeSettings) UnmarshalJSON(data []byte) error { + var res jsonNodePoolUpdateSettingsSurgeSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolUpdateSettingsSurgeSettings + } else { + + r.MaxSurge = res.MaxSurge + + r.MaxUnavailable = res.MaxUnavailable + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolUpdateSettingsSurgeSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolUpdateSettingsSurgeSettings *NodePoolUpdateSettingsSurgeSettings = &NodePoolUpdateSettingsSurgeSettings{empty: true} + +func (r *NodePoolUpdateSettingsSurgeSettings) Empty() bool { + return r.empty +} + +func (r *NodePoolUpdateSettingsSurgeSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolUpdateSettingsSurgeSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *NodePool) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_aws", + Type: "NodePool", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containeraws", +{{- end }} + } +} + +func (r *NodePool) ID() (string, error) { + if err := extractNodePoolFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "version": dcl.ValueOrEmptyString(nr.Version), + "config": dcl.ValueOrEmptyString(nr.Config), + "autoscaling": dcl.ValueOrEmptyString(nr.Autoscaling), + "subnet_id": dcl.ValueOrEmptyString(nr.SubnetId), + "state": dcl.ValueOrEmptyString(nr.State), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "max_pods_constraint": dcl.ValueOrEmptyString(nr.MaxPodsConstraint), + "management": dcl.ValueOrEmptyString(nr.Management), + "kubelet_config": dcl.ValueOrEmptyString(nr.KubeletConfig), + "update_settings": dcl.ValueOrEmptyString(nr.UpdateSettings), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const NodePoolMaxPage = -1 + +type NodePoolList struct { + Items []*NodePool + + nextToken string + + pageSize int32 + + resource *NodePool +} + +func (l *NodePoolList) HasNext() bool { + return l.nextToken != "" +} + +func (l *NodePoolList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listNodePool(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListNodePool(ctx context.Context, project, location, cluster string) (*NodePoolList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListNodePoolWithMaxResults(ctx, project, location, cluster, NodePoolMaxPage) + +} + +func (c *Client) ListNodePoolWithMaxResults(ctx context.Context, project, location, cluster string, pageSize int32) (*NodePoolList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &NodePool{ + Project: &project, + Location: &location, + Cluster: &cluster, + } + items, token, err := c.listNodePool(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &NodePoolList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetNodePool(ctx context.Context, r *NodePool) (*NodePool, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractNodePoolFields(r) + + b, err := c.getNodePoolRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalNodePool(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Cluster = r.Cluster + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeNodePoolNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractNodePoolFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteNodePool(ctx context.Context, r *NodePool) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("NodePool resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting NodePool...") + deleteOp := deleteNodePoolOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllNodePool deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllNodePool(ctx context.Context, project, location, cluster string, filter func(*NodePool) bool) error { + listObj, err := c.ListNodePool(ctx, project, location, cluster) + if err != nil { + return err + } + + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyNodePool(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *NodePool + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyNodePoolHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyNodePoolHelper(c *Client, ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyNodePool...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractNodePoolFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.nodePoolDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToNodePoolDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []nodePoolApiOperation + if create { + ops = append(ops, &createNodePoolOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyNodePoolDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyNodePoolDiff(c *Client, ctx context.Context, desired *NodePool, rawDesired *NodePool, ops []nodePoolApiOperation, opts ...dcl.ApplyOption) (*NodePool, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetNodePool(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createNodePoolOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapNodePool(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeNodePoolNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeNodePoolNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeNodePoolDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractNodePoolFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractNodePoolFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffNodePool(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl new file mode 100644 index 000000000000..24747a2267ca --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl @@ -0,0 +1,6289 @@ +package containeraws + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *NodePool) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "config"); err != nil { + return err + } + if err := dcl.Required(r, "autoscaling"); err != nil { + return err + } + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "maxPodsConstraint"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Cluster, "Cluster"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Autoscaling) { + if err := r.Autoscaling.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MaxPodsConstraint) { + if err := r.MaxPodsConstraint.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Management) { + if err := r.Management.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.KubeletConfig) { + if err := r.KubeletConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.UpdateSettings) { + if err := r.UpdateSettings.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfig) validate() error { + if err := dcl.Required(r, "iamInstanceProfile"); err != nil { + return err + } + if err := dcl.Required(r, "configEncryption"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ConfigEncryption) { + if err := r.ConfigEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.SpotConfig) { + if err := r.SpotConfig.validate(); err != nil { + return err + } + } +{{- end }} + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.InstancePlacement) { + if err := r.InstancePlacement.validate(); err != nil { + return err + } + } +{{- end }} + if !dcl.IsEmptyValueIndirect(r.AutoscalingMetricsCollection) { + if err := r.AutoscalingMetricsCollection.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfigRootVolume) validate() error { + return nil +} +func (r *NodePoolConfigTaints) validate() error { + if err := dcl.Required(r, "key"); err != nil { + return err + } + if err := dcl.Required(r, "value"); err != nil { + return err + } + if err := dcl.Required(r, "effect"); err != nil { + return err + } + return nil +} +func (r *NodePoolConfigConfigEncryption) validate() error { + if err := dcl.Required(r, "kmsKeyArn"); err != nil { + return err + } + return nil +} +func (r *NodePoolConfigSshConfig) validate() error { + if err := dcl.Required(r, "ec2KeyPair"); err != nil { + return err + } + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func (r *NodePoolConfigSpotConfig) validate() error { + if err := dcl.Required(r, "instanceTypes"); err != nil { + return err + } + return nil +} +{{- end }} +func (r *NodePoolConfigProxyConfig) validate() error { + if err := dcl.Required(r, "secretArn"); err != nil { + return err + } + if err := dcl.Required(r, "secretVersion"); err != nil { + return err + } +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func (r *NodePoolConfigInstancePlacement) validate() error { +{{- end }} + return nil +} +func (r *NodePoolConfigAutoscalingMetricsCollection) validate() error { + if err := dcl.Required(r, "granularity"); err != nil { + return err + } + return nil +} +func (r *NodePoolAutoscaling) validate() error { + if err := dcl.Required(r, "minNodeCount"); err != nil { + return err + } + if err := dcl.Required(r, "maxNodeCount"); err != nil { + return err + } + return nil +} +func (r *NodePoolMaxPodsConstraint) validate() error { + if err := dcl.Required(r, "maxPodsPerNode"); err != nil { + return err + } + return nil +} +func (r *NodePoolManagement) validate() error { + return nil +} +func (r *NodePoolKubeletConfig) validate() error { + return nil +} +func (r *NodePoolUpdateSettings) validate() error { + if !dcl.IsEmptyValueIndirect(r.SurgeSettings) { + if err := r.SurgeSettings.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolUpdateSettingsSurgeSettings) validate() error { + return nil +} +func (r *NodePool) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *NodePool) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *NodePool) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools?awsNodePoolId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// nodePoolApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type nodePoolApiOperation interface { + do(context.Context, *NodePool, *Client) error +} + +// newUpdateNodePoolUpdateAwsNodePoolRequest creates a request for an +// NodePool resource's UpdateAwsNodePool update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateNodePoolUpdateAwsNodePoolRequest(ctx context.Context, f *NodePool, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + req["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["config"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["autoscaling"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["management"] = v + } + if v, err := expandNodePoolUpdateSettings(c, f.UpdateSettings, res); err != nil { + return nil, fmt.Errorf("error expanding UpdateSettings into updateSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["updateSettings"] = v + } + b, err := c.getNodePoolRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateNodePoolUpdateAwsNodePoolRequest converts the update into +// the final JSON request body. +func marshalUpdateNodePoolUpdateAwsNodePoolRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateNodePoolUpdateAwsNodePoolOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateNodePoolUpdateAwsNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + _, err := c.GetNodePool(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAwsNodePool") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateNodePoolUpdateAwsNodePoolRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateNodePoolUpdateAwsNodePoolRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listNodePoolRaw(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != NodePoolMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listNodePoolOperation struct { + AwsNodePools []map[string]interface{} `json:"awsNodePools"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listNodePool(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]*NodePool, string, error) { + b, err := c.listNodePoolRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listNodePoolOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*NodePool + for _, v := range m.AwsNodePools { + res, err := unmarshalMapNodePool(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Cluster = r.Cluster + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllNodePool(ctx context.Context, f func(*NodePool) bool, resources []*NodePool) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteNodePool(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteNodePoolOperation struct{} + +func (op *deleteNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + r, err := c.GetNodePool(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "NodePool not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetNodePool checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetNodePool(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createNodePoolOperation struct { + response map[string]interface{} +} + +func (op *createNodePoolOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetNodePool(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getNodePoolRaw(ctx context.Context, r *NodePool) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) nodePoolDiffsForRawDesired(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (initial, desired *NodePool, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *NodePool + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*NodePool); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected NodePool, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetNodePool(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a NodePool resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve NodePool resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that NodePool resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for NodePool: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for NodePool: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractNodePoolFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeNodePoolInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for NodePool: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for NodePool: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffNodePool(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeNodePoolInitialState(rawInitial, rawDesired *NodePool) (*NodePool, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeNodePoolDesiredState(rawDesired, rawInitial *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, nil, opts...) + rawDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, nil, opts...) + rawDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, nil, opts...) + rawDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, nil, opts...) + rawDesired.KubeletConfig = canonicalizeNodePoolKubeletConfig(rawDesired.KubeletConfig, nil, opts...) + rawDesired.UpdateSettings = canonicalizeNodePoolUpdateSettings(rawDesired.UpdateSettings, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &NodePool{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Version, rawInitial.Version) { + canonicalDesired.Version = rawInitial.Version + } else { + canonicalDesired.Version = rawDesired.Version + } + canonicalDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, rawInitial.Config, opts...) + canonicalDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, rawInitial.Autoscaling, opts...) + if dcl.StringCanonicalize(rawDesired.SubnetId, rawInitial.SubnetId) { + canonicalDesired.SubnetId = rawInitial.SubnetId + } else { + canonicalDesired.SubnetId = rawDesired.SubnetId + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + canonicalDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, rawInitial.MaxPodsConstraint, opts...) + canonicalDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, rawInitial.Management, opts...) + canonicalDesired.KubeletConfig = canonicalizeNodePoolKubeletConfig(rawDesired.KubeletConfig, rawInitial.KubeletConfig, opts...) + canonicalDesired.UpdateSettings = canonicalizeNodePoolUpdateSettings(rawDesired.UpdateSettings, rawInitial.UpdateSettings, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Cluster, rawInitial.Cluster) { + canonicalDesired.Cluster = rawInitial.Cluster + } else { + canonicalDesired.Cluster = rawDesired.Cluster + } + return canonicalDesired, nil +} + +func canonicalizeNodePoolNewState(c *Client, rawNew, rawDesired *NodePool) (*NodePool, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) { + rawNew.Version = rawDesired.Version + } else { + if dcl.StringCanonicalize(rawDesired.Version, rawNew.Version) { + rawNew.Version = rawDesired.Version + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Config) && dcl.IsEmptyValueIndirect(rawDesired.Config) { + rawNew.Config = rawDesired.Config + } else { + rawNew.Config = canonicalizeNewNodePoolConfig(c, rawDesired.Config, rawNew.Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.Autoscaling) && dcl.IsEmptyValueIndirect(rawDesired.Autoscaling) { + rawNew.Autoscaling = rawDesired.Autoscaling + } else { + rawNew.Autoscaling = canonicalizeNewNodePoolAutoscaling(c, rawDesired.Autoscaling, rawNew.Autoscaling) + } + + if dcl.IsEmptyValueIndirect(rawNew.SubnetId) && dcl.IsEmptyValueIndirect(rawDesired.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } else { + if dcl.StringCanonicalize(rawDesired.SubnetId, rawNew.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.MaxPodsConstraint) && dcl.IsEmptyValueIndirect(rawDesired.MaxPodsConstraint) { + rawNew.MaxPodsConstraint = rawDesired.MaxPodsConstraint + } else { + rawNew.MaxPodsConstraint = canonicalizeNewNodePoolMaxPodsConstraint(c, rawDesired.MaxPodsConstraint, rawNew.MaxPodsConstraint) + } + + if dcl.IsEmptyValueIndirect(rawNew.Management) && dcl.IsEmptyValueIndirect(rawDesired.Management) { + rawNew.Management = rawDesired.Management + } else { + rawNew.Management = canonicalizeNewNodePoolManagement(c, rawDesired.Management, rawNew.Management) + } + + if dcl.IsEmptyValueIndirect(rawNew.KubeletConfig) && dcl.IsEmptyValueIndirect(rawDesired.KubeletConfig) { + rawNew.KubeletConfig = rawDesired.KubeletConfig + } else { + rawNew.KubeletConfig = canonicalizeNewNodePoolKubeletConfig(c, rawDesired.KubeletConfig, rawNew.KubeletConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateSettings) && dcl.IsEmptyValueIndirect(rawDesired.UpdateSettings) { + rawNew.UpdateSettings = rawDesired.UpdateSettings + } else { + rawNew.UpdateSettings = canonicalizeNewNodePoolUpdateSettings(c, rawDesired.UpdateSettings, rawNew.UpdateSettings) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Cluster = rawDesired.Cluster + + return rawNew, nil +} + +func canonicalizeNodePoolConfig(des, initial *NodePoolConfig, opts ...dcl.ApplyOption) *NodePoolConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfig{} + + if dcl.StringCanonicalize(des.InstanceType, initial.InstanceType) || dcl.IsZeroValue(des.InstanceType) { + cDes.InstanceType = initial.InstanceType + } else { + cDes.InstanceType = des.InstanceType + } + cDes.RootVolume = canonicalizeNodePoolConfigRootVolume(des.RootVolume, initial.RootVolume, opts...) + cDes.Taints = canonicalizeNodePoolConfigTaintsSlice(des.Taints, initial.Taints, opts...) + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.StringCanonicalize(des.IamInstanceProfile, initial.IamInstanceProfile) || dcl.IsZeroValue(des.IamInstanceProfile) { + cDes.IamInstanceProfile = initial.IamInstanceProfile + } else { + cDes.IamInstanceProfile = des.IamInstanceProfile + } + cDes.ConfigEncryption = canonicalizeNodePoolConfigConfigEncryption(des.ConfigEncryption, initial.ConfigEncryption, opts...) + cDes.SshConfig = canonicalizeNodePoolConfigSshConfig(des.SshConfig, initial.SshConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.SpotConfig = canonicalizeNodePoolConfigSpotConfig(des.SpotConfig, initial.SpotConfig, opts...) +{{- end }} + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, initial.SecurityGroupIds) { + cDes.SecurityGroupIds = initial.SecurityGroupIds + } else { + cDes.SecurityGroupIds = des.SecurityGroupIds + } + cDes.ProxyConfig = canonicalizeNodePoolConfigProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.InstancePlacement = canonicalizeNodePoolConfigInstancePlacement(des.InstancePlacement, initial.InstancePlacement, opts...) + if dcl.StringCanonicalize(des.ImageType, initial.ImageType) || dcl.IsZeroValue(des.ImageType) { + cDes.ImageType = initial.ImageType + } else { + cDes.ImageType = des.ImageType + } +{{- end }} + cDes.AutoscalingMetricsCollection = canonicalizeNodePoolConfigAutoscalingMetricsCollection(des.AutoscalingMetricsCollection, initial.AutoscalingMetricsCollection, opts...) + + return cDes +} + +func canonicalizeNodePoolConfigSlice(des, initial []NodePoolConfig, opts ...dcl.ApplyOption) []NodePoolConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfig(c *Client, des, nw *NodePoolConfig) *NodePoolConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceType, nw.InstanceType) { + nw.InstanceType = des.InstanceType + } + nw.RootVolume = canonicalizeNewNodePoolConfigRootVolume(c, des.RootVolume, nw.RootVolume) + nw.Taints = canonicalizeNewNodePoolConfigTaintsSlice(c, des.Taints, nw.Taints) + if dcl.StringCanonicalize(des.IamInstanceProfile, nw.IamInstanceProfile) { + nw.IamInstanceProfile = des.IamInstanceProfile + } + nw.ConfigEncryption = canonicalizeNewNodePoolConfigConfigEncryption(c, des.ConfigEncryption, nw.ConfigEncryption) + nw.SshConfig = canonicalizeNewNodePoolConfigSshConfig(c, des.SshConfig, nw.SshConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.SpotConfig = canonicalizeNewNodePoolConfigSpotConfig(c, des.SpotConfig, nw.SpotConfig) +{{- end }} + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, nw.SecurityGroupIds) { + nw.SecurityGroupIds = des.SecurityGroupIds + } + nw.ProxyConfig = canonicalizeNewNodePoolConfigProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.InstancePlacement = canonicalizeNewNodePoolConfigInstancePlacement(c, des.InstancePlacement, nw.InstancePlacement) + if dcl.StringCanonicalize(des.ImageType, nw.ImageType) { + nw.ImageType = des.ImageType + } +{{- end }} + nw.AutoscalingMetricsCollection = canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c, des.AutoscalingMetricsCollection, nw.AutoscalingMetricsCollection) + + return nw +} + +func canonicalizeNewNodePoolConfigSet(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSlice(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigRootVolume(des, initial *NodePoolConfigRootVolume, opts ...dcl.ApplyOption) *NodePoolConfigRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + if dcl.IsZeroValue(des.VolumeType) || (dcl.IsEmptyValueIndirect(des.VolumeType) && dcl.IsEmptyValueIndirect(initial.VolumeType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.VolumeType = initial.VolumeType + } else { + cDes.VolumeType = des.VolumeType + } + if dcl.IsZeroValue(des.Iops) || (dcl.IsEmptyValueIndirect(des.Iops) && dcl.IsEmptyValueIndirect(initial.Iops)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Iops = initial.Iops + } else { + cDes.Iops = des.Iops + } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeNodePoolConfigRootVolumeSlice(des, initial []NodePoolConfigRootVolume, opts ...dcl.ApplyOption) []NodePoolConfigRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigRootVolume(c *Client, des, nw *NodePoolConfigRootVolume) *NodePoolConfigRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewNodePoolConfigRootVolumeSet(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigRootVolumeSlice(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigTaints(des, initial *NodePoolConfigTaints, opts ...dcl.ApplyOption) *NodePoolConfigTaints { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigTaints{} + + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringCanonicalize(des.Value, initial.Value) || dcl.IsZeroValue(des.Value) { + cDes.Value = initial.Value + } else { + cDes.Value = des.Value + } + if dcl.IsZeroValue(des.Effect) || (dcl.IsEmptyValueIndirect(des.Effect) && dcl.IsEmptyValueIndirect(initial.Effect)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Effect = initial.Effect + } else { + cDes.Effect = des.Effect + } + + return cDes +} + +func canonicalizeNodePoolConfigTaintsSlice(des, initial []NodePoolConfigTaints, opts ...dcl.ApplyOption) []NodePoolConfigTaints { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigTaints, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigTaints(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigTaints, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigTaints(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigTaints(c *Client, des, nw *NodePoolConfigTaints) *NodePoolConfigTaints { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigTaints while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringCanonicalize(des.Value, nw.Value) { + nw.Value = des.Value + } + + return nw +} + +func canonicalizeNewNodePoolConfigTaintsSet(c *Client, des, nw []NodePoolConfigTaints) []NodePoolConfigTaints { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigTaints + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigTaintsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigTaints(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigTaintsSlice(c *Client, des, nw []NodePoolConfigTaints) []NodePoolConfigTaints { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigTaints + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigTaints(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigConfigEncryption(des, initial *NodePoolConfigConfigEncryption, opts ...dcl.ApplyOption) *NodePoolConfigConfigEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigConfigEncryption{} + + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeNodePoolConfigConfigEncryptionSlice(des, initial []NodePoolConfigConfigEncryption, opts ...dcl.ApplyOption) []NodePoolConfigConfigEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigConfigEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigConfigEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigConfigEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigConfigEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigConfigEncryption(c *Client, des, nw *NodePoolConfigConfigEncryption) *NodePoolConfigConfigEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigConfigEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewNodePoolConfigConfigEncryptionSet(c *Client, des, nw []NodePoolConfigConfigEncryption) []NodePoolConfigConfigEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigConfigEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigConfigEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigConfigEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigConfigEncryptionSlice(c *Client, des, nw []NodePoolConfigConfigEncryption) []NodePoolConfigConfigEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigConfigEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigConfigEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigSshConfig(des, initial *NodePoolConfigSshConfig, opts ...dcl.ApplyOption) *NodePoolConfigSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigSshConfig{} + + if dcl.StringCanonicalize(des.Ec2KeyPair, initial.Ec2KeyPair) || dcl.IsZeroValue(des.Ec2KeyPair) { + cDes.Ec2KeyPair = initial.Ec2KeyPair + } else { + cDes.Ec2KeyPair = des.Ec2KeyPair + } + + return cDes +} + +func canonicalizeNodePoolConfigSshConfigSlice(des, initial []NodePoolConfigSshConfig, opts ...dcl.ApplyOption) []NodePoolConfigSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigSshConfig(c *Client, des, nw *NodePoolConfigSshConfig) *NodePoolConfigSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Ec2KeyPair, nw.Ec2KeyPair) { + nw.Ec2KeyPair = des.Ec2KeyPair + } + + return nw +} + +func canonicalizeNewNodePoolConfigSshConfigSet(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSshConfigSlice(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &n)) + } + + return items +} + +{{- if ne $.TargetVersionName "ga" }} +func canonicalizeNodePoolConfigSpotConfig(des, initial *NodePoolConfigSpotConfig, opts ...dcl.ApplyOption) *NodePoolConfigSpotConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigSpotConfig{} + + if dcl.StringArrayCanonicalize(des.InstanceTypes, initial.InstanceTypes) { + cDes.InstanceTypes = initial.InstanceTypes + } else { + cDes.InstanceTypes = des.InstanceTypes + } + + return cDes +} + +func canonicalizeNodePoolConfigSpotConfigSlice(des, initial []NodePoolConfigSpotConfig, opts ...dcl.ApplyOption) []NodePoolConfigSpotConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigSpotConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigSpotConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigSpotConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigSpotConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigSpotConfig(c *Client, des, nw *NodePoolConfigSpotConfig) *NodePoolConfigSpotConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigSpotConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceTypes, nw.InstanceTypes) { + nw.InstanceTypes = des.InstanceTypes + } + + return nw +} + +func canonicalizeNewNodePoolConfigSpotConfigSet(c *Client, des, nw []NodePoolConfigSpotConfig) []NodePoolConfigSpotConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigSpotConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigSpotConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigSpotConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSpotConfigSlice(c *Client, des, nw []NodePoolConfigSpotConfig) []NodePoolConfigSpotConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigSpotConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigSpotConfig(c, &d, &n)) + } + + return items +} + +{{- end }} +func canonicalizeNodePoolConfigProxyConfig(des, initial *NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) *NodePoolConfigProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigProxyConfig{} + + if dcl.StringCanonicalize(des.SecretArn, initial.SecretArn) || dcl.IsZeroValue(des.SecretArn) { + cDes.SecretArn = initial.SecretArn + } else { + cDes.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, initial.SecretVersion) || dcl.IsZeroValue(des.SecretVersion) { + cDes.SecretVersion = initial.SecretVersion + } else { + cDes.SecretVersion = des.SecretVersion + } + + return cDes +} + +func canonicalizeNodePoolConfigProxyConfigSlice(des, initial []NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) []NodePoolConfigProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigProxyConfig(c *Client, des, nw *NodePoolConfigProxyConfig) *NodePoolConfigProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SecretArn, nw.SecretArn) { + nw.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, nw.SecretVersion) { + nw.SecretVersion = des.SecretVersion + } + + return nw +} + +func canonicalizeNewNodePoolConfigProxyConfigSet(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigProxyConfigSlice(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeNodePoolConfigInstancePlacement(des, initial *NodePoolConfigInstancePlacement, opts ...dcl.ApplyOption) *NodePoolConfigInstancePlacement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigInstancePlacement{} + + if dcl.IsZeroValue(des.Tenancy) || (dcl.IsEmptyValueIndirect(des.Tenancy) && dcl.IsEmptyValueIndirect(initial.Tenancy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tenancy = initial.Tenancy + } else { + cDes.Tenancy = des.Tenancy + } + + return cDes +} + +func canonicalizeNodePoolConfigInstancePlacementSlice(des, initial []NodePoolConfigInstancePlacement, opts ...dcl.ApplyOption) []NodePoolConfigInstancePlacement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigInstancePlacement, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigInstancePlacement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigInstancePlacement, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigInstancePlacement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigInstancePlacement(c *Client, des, nw *NodePoolConfigInstancePlacement) *NodePoolConfigInstancePlacement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigInstancePlacement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolConfigInstancePlacementSet(c *Client, des, nw []NodePoolConfigInstancePlacement) []NodePoolConfigInstancePlacement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigInstancePlacement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigInstancePlacementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigInstancePlacement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigInstancePlacementSlice(c *Client, des, nw []NodePoolConfigInstancePlacement) []NodePoolConfigInstancePlacement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigInstancePlacement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigInstancePlacement(c, &d, &n)) +{{- end }} + } + + return items +} + +func canonicalizeNodePoolConfigAutoscalingMetricsCollection(des, initial *NodePoolConfigAutoscalingMetricsCollection, opts ...dcl.ApplyOption) *NodePoolConfigAutoscalingMetricsCollection { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigAutoscalingMetricsCollection{} + + if dcl.StringCanonicalize(des.Granularity, initial.Granularity) || dcl.IsZeroValue(des.Granularity) { + cDes.Granularity = initial.Granularity + } else { + cDes.Granularity = des.Granularity + } + if dcl.StringArrayCanonicalize(des.Metrics, initial.Metrics) { + cDes.Metrics = initial.Metrics + } else { + cDes.Metrics = des.Metrics + } + + return cDes +} + +func canonicalizeNodePoolConfigAutoscalingMetricsCollectionSlice(des, initial []NodePoolConfigAutoscalingMetricsCollection, opts ...dcl.ApplyOption) []NodePoolConfigAutoscalingMetricsCollection { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigAutoscalingMetricsCollection, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigAutoscalingMetricsCollection(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigAutoscalingMetricsCollection, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigAutoscalingMetricsCollection(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c *Client, des, nw *NodePoolConfigAutoscalingMetricsCollection) *NodePoolConfigAutoscalingMetricsCollection { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigAutoscalingMetricsCollection while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Granularity, nw.Granularity) { + nw.Granularity = des.Granularity + } + if dcl.StringArrayCanonicalize(des.Metrics, nw.Metrics) { + nw.Metrics = des.Metrics + } + + return nw +} + +func canonicalizeNewNodePoolConfigAutoscalingMetricsCollectionSet(c *Client, des, nw []NodePoolConfigAutoscalingMetricsCollection) []NodePoolConfigAutoscalingMetricsCollection { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigAutoscalingMetricsCollection + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigAutoscalingMetricsCollectionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigAutoscalingMetricsCollectionSlice(c *Client, des, nw []NodePoolConfigAutoscalingMetricsCollection) []NodePoolConfigAutoscalingMetricsCollection { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigAutoscalingMetricsCollection + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolAutoscaling(des, initial *NodePoolAutoscaling, opts ...dcl.ApplyOption) *NodePoolAutoscaling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolAutoscaling{} + + if dcl.IsZeroValue(des.MinNodeCount) || (dcl.IsEmptyValueIndirect(des.MinNodeCount) && dcl.IsEmptyValueIndirect(initial.MinNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinNodeCount = initial.MinNodeCount + } else { + cDes.MinNodeCount = des.MinNodeCount + } + if dcl.IsZeroValue(des.MaxNodeCount) || (dcl.IsEmptyValueIndirect(des.MaxNodeCount) && dcl.IsEmptyValueIndirect(initial.MaxNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxNodeCount = initial.MaxNodeCount + } else { + cDes.MaxNodeCount = des.MaxNodeCount + } + + return cDes +} + +func canonicalizeNodePoolAutoscalingSlice(des, initial []NodePoolAutoscaling, opts ...dcl.ApplyOption) []NodePoolAutoscaling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolAutoscaling, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolAutoscaling, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolAutoscaling(c *Client, des, nw *NodePoolAutoscaling) *NodePoolAutoscaling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolAutoscaling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolAutoscalingSet(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolAutoscaling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolAutoscalingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolAutoscalingSlice(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolAutoscaling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolMaxPodsConstraint(des, initial *NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) *NodePoolMaxPodsConstraint { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolMaxPodsConstraint{} + + if dcl.IsZeroValue(des.MaxPodsPerNode) || (dcl.IsEmptyValueIndirect(des.MaxPodsPerNode) && dcl.IsEmptyValueIndirect(initial.MaxPodsPerNode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxPodsPerNode = initial.MaxPodsPerNode + } else { + cDes.MaxPodsPerNode = des.MaxPodsPerNode + } + + return cDes +} + +func canonicalizeNodePoolMaxPodsConstraintSlice(des, initial []NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) []NodePoolMaxPodsConstraint { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolMaxPodsConstraint(c *Client, des, nw *NodePoolMaxPodsConstraint) *NodePoolMaxPodsConstraint { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolMaxPodsConstraint while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolMaxPodsConstraintSet(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolMaxPodsConstraint + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolMaxPodsConstraintNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolMaxPodsConstraintSlice(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolMaxPodsConstraint + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolManagement(des, initial *NodePoolManagement, opts ...dcl.ApplyOption) *NodePoolManagement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolManagement{} + + if dcl.BoolCanonicalize(des.AutoRepair, initial.AutoRepair) || dcl.IsZeroValue(des.AutoRepair) { + cDes.AutoRepair = initial.AutoRepair + } else { + cDes.AutoRepair = des.AutoRepair + } + + return cDes +} + +func canonicalizeNodePoolManagementSlice(des, initial []NodePoolManagement, opts ...dcl.ApplyOption) []NodePoolManagement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolManagement, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolManagement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolManagement, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolManagement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolManagement(c *Client, des, nw *NodePoolManagement) *NodePoolManagement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolManagement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutoRepair, nw.AutoRepair) { + nw.AutoRepair = des.AutoRepair + } + + return nw +} + +func canonicalizeNewNodePoolManagementSet(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolManagement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolManagementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolManagementSlice(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolManagement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolKubeletConfig(des, initial *NodePoolKubeletConfig, opts ...dcl.ApplyOption) *NodePoolKubeletConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolKubeletConfig{} + + if dcl.IsZeroValue(des.CpuManagerPolicy) || (dcl.IsEmptyValueIndirect(des.CpuManagerPolicy) && dcl.IsEmptyValueIndirect(initial.CpuManagerPolicy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.CpuManagerPolicy = initial.CpuManagerPolicy + } else { + cDes.CpuManagerPolicy = des.CpuManagerPolicy + } + if dcl.BoolCanonicalize(des.CpuCfsQuota, initial.CpuCfsQuota) || dcl.IsZeroValue(des.CpuCfsQuota) { + cDes.CpuCfsQuota = initial.CpuCfsQuota + } else { + cDes.CpuCfsQuota = des.CpuCfsQuota + } + if dcl.StringCanonicalize(des.CpuCfsQuotaPeriod, initial.CpuCfsQuotaPeriod) || dcl.IsZeroValue(des.CpuCfsQuotaPeriod) { + cDes.CpuCfsQuotaPeriod = initial.CpuCfsQuotaPeriod + } else { + cDes.CpuCfsQuotaPeriod = des.CpuCfsQuotaPeriod + } + if dcl.IsZeroValue(des.PodPidsLimit) || (dcl.IsEmptyValueIndirect(des.PodPidsLimit) && dcl.IsEmptyValueIndirect(initial.PodPidsLimit)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PodPidsLimit = initial.PodPidsLimit + } else { + cDes.PodPidsLimit = des.PodPidsLimit + } + + return cDes +} + +func canonicalizeNodePoolKubeletConfigSlice(des, initial []NodePoolKubeletConfig, opts ...dcl.ApplyOption) []NodePoolKubeletConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolKubeletConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolKubeletConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolKubeletConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolKubeletConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolKubeletConfig(c *Client, des, nw *NodePoolKubeletConfig) *NodePoolKubeletConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolKubeletConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.CpuCfsQuota, nw.CpuCfsQuota) { + nw.CpuCfsQuota = des.CpuCfsQuota + } + if dcl.StringCanonicalize(des.CpuCfsQuotaPeriod, nw.CpuCfsQuotaPeriod) { + nw.CpuCfsQuotaPeriod = des.CpuCfsQuotaPeriod + } + + return nw +} + +func canonicalizeNewNodePoolKubeletConfigSet(c *Client, des, nw []NodePoolKubeletConfig) []NodePoolKubeletConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolKubeletConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolKubeletConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolKubeletConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolKubeletConfigSlice(c *Client, des, nw []NodePoolKubeletConfig) []NodePoolKubeletConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolKubeletConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolKubeletConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolUpdateSettings(des, initial *NodePoolUpdateSettings, opts ...dcl.ApplyOption) *NodePoolUpdateSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolUpdateSettings{} + + cDes.SurgeSettings = canonicalizeNodePoolUpdateSettingsSurgeSettings(des.SurgeSettings, initial.SurgeSettings, opts...) + + return cDes +} + +func canonicalizeNodePoolUpdateSettingsSlice(des, initial []NodePoolUpdateSettings, opts ...dcl.ApplyOption) []NodePoolUpdateSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolUpdateSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolUpdateSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolUpdateSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolUpdateSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolUpdateSettings(c *Client, des, nw *NodePoolUpdateSettings) *NodePoolUpdateSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolUpdateSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.SurgeSettings = canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c, des.SurgeSettings, nw.SurgeSettings) + + return nw +} + +func canonicalizeNewNodePoolUpdateSettingsSet(c *Client, des, nw []NodePoolUpdateSettings) []NodePoolUpdateSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolUpdateSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolUpdateSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolUpdateSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolUpdateSettingsSlice(c *Client, des, nw []NodePoolUpdateSettings) []NodePoolUpdateSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolUpdateSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolUpdateSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolUpdateSettingsSurgeSettings(des, initial *NodePoolUpdateSettingsSurgeSettings, opts ...dcl.ApplyOption) *NodePoolUpdateSettingsSurgeSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolUpdateSettingsSurgeSettings{} + + if dcl.IsZeroValue(des.MaxSurge) || (dcl.IsEmptyValueIndirect(des.MaxSurge) && dcl.IsEmptyValueIndirect(initial.MaxSurge)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxSurge = initial.MaxSurge + } else { + cDes.MaxSurge = des.MaxSurge + } + if dcl.IsZeroValue(des.MaxUnavailable) || (dcl.IsEmptyValueIndirect(des.MaxUnavailable) && dcl.IsEmptyValueIndirect(initial.MaxUnavailable)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxUnavailable = initial.MaxUnavailable + } else { + cDes.MaxUnavailable = des.MaxUnavailable + } + + return cDes +} + +func canonicalizeNodePoolUpdateSettingsSurgeSettingsSlice(des, initial []NodePoolUpdateSettingsSurgeSettings, opts ...dcl.ApplyOption) []NodePoolUpdateSettingsSurgeSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolUpdateSettingsSurgeSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolUpdateSettingsSurgeSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolUpdateSettingsSurgeSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolUpdateSettingsSurgeSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c *Client, des, nw *NodePoolUpdateSettingsSurgeSettings) *NodePoolUpdateSettingsSurgeSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolUpdateSettingsSurgeSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolUpdateSettingsSurgeSettingsSet(c *Client, des, nw []NodePoolUpdateSettingsSurgeSettings) []NodePoolUpdateSettingsSurgeSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolUpdateSettingsSurgeSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolUpdateSettingsSurgeSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolUpdateSettingsSurgeSettingsSlice(c *Client, des, nw []NodePoolUpdateSettingsSurgeSettings) []NodePoolUpdateSettingsSurgeSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolUpdateSettingsSurgeSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffNodePool(c *Client, desired, actual *NodePool, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigNewStyle, EmptyObject: EmptyNodePoolConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Autoscaling, actual.Autoscaling, dcl.DiffInfo{ObjectFunction: compareNodePoolAutoscalingNewStyle, EmptyObject: EmptyNodePoolAutoscaling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Autoscaling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxPodsConstraint, actual.MaxPodsConstraint, dcl.DiffInfo{ObjectFunction: compareNodePoolMaxPodsConstraintNewStyle, EmptyObject: EmptyNodePoolMaxPodsConstraint, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsConstraint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ObjectFunction: compareNodePoolManagementNewStyle, EmptyObject: EmptyNodePoolManagement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KubeletConfig, actual.KubeletConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolKubeletConfigNewStyle, EmptyObject: EmptyNodePoolKubeletConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubeletConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateSettings, actual.UpdateSettings, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolUpdateSettingsNewStyle, EmptyObject: EmptyNodePoolUpdateSettings, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("UpdateSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareNodePoolConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig or *NodePoolConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceType, actual.InstanceType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("InstanceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolConfigRootVolumeNewStyle, EmptyObject: EmptyNodePoolConfigRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Taints, actual.Taints, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigTaintsNewStyle, EmptyObject: EmptyNodePoolConfigTaints, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Taints")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IamInstanceProfile, actual.IamInstanceProfile, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("IamInstanceProfile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConfigEncryption, actual.ConfigEncryption, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigConfigEncryptionNewStyle, EmptyObject: EmptyNodePoolConfigConfigEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigSshConfigNewStyle, EmptyObject: EmptyNodePoolConfigSshConfig, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + +{{- if ne $.TargetVersionName "ga" }} + if ds, err := dcl.Diff(desired.SpotConfig, actual.SpotConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigSpotConfigNewStyle, EmptyObject: EmptyNodePoolConfigSpotConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SpotConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + +{{- end }} + if ds, err := dcl.Diff(desired.SecurityGroupIds, actual.SecurityGroupIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SecurityGroupIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigProxyConfigNewStyle, EmptyObject: EmptyNodePoolConfigProxyConfig, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstancePlacement, actual.InstancePlacement, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolConfigInstancePlacementNewStyle, EmptyObject: EmptyNodePoolConfigInstancePlacement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstancePlacement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ImageType, actual.ImageType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageType")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoscalingMetricsCollection, actual.AutoscalingMetricsCollection, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigAutoscalingMetricsCollectionNewStyle, EmptyObject: EmptyNodePoolConfigAutoscalingMetricsCollection, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("AutoscalingMetricsCollection")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigRootVolume) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume or *NodePoolConfigRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigRootVolume) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VolumeType, actual.VolumeType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("VolumeType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Iops, actual.Iops, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Iops")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigTaintsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigTaints) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigTaints) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigTaints or *NodePoolConfigTaints", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigTaints) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigTaints) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigTaints", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Value")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Effect, actual.Effect, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Effect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigConfigEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigConfigEncryption) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigConfigEncryption or *NodePoolConfigConfigEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigConfigEncryption) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigConfigEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigSshConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig or *NodePoolConfigSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigSshConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Ec2KeyPair, actual.Ec2KeyPair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Ec2KeyPair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- if ne $.TargetVersionName "ga" }} +func compareNodePoolConfigSpotConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigSpotConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigSpotConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSpotConfig or *NodePoolConfigSpotConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigSpotConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigSpotConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSpotConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTypes, actual.InstanceTypes, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTypes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- end }} +func compareNodePoolConfigProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigProxyConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig or *NodePoolConfigProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigProxyConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SecretArn, actual.SecretArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SecretArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretVersion, actual.SecretVersion, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SecretVersion")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigInstancePlacementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigInstancePlacement) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigInstancePlacement or *NodePoolConfigInstancePlacement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigInstancePlacement) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigInstancePlacement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Tenancy, actual.Tenancy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tenancy")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigAutoscalingMetricsCollectionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigAutoscalingMetricsCollection) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigAutoscalingMetricsCollection) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigAutoscalingMetricsCollection or *NodePoolConfigAutoscalingMetricsCollection", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigAutoscalingMetricsCollection) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigAutoscalingMetricsCollection) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigAutoscalingMetricsCollection", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Granularity, actual.Granularity, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Granularity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metrics, actual.Metrics, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Metrics")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolAutoscalingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolAutoscaling) + if !ok { + desiredNotPointer, ok := d.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling or *NodePoolAutoscaling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolAutoscaling) + if !ok { + actualNotPointer, ok := a.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinNodeCount, actual.MinNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MinNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxNodeCount, actual.MaxNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MaxNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolMaxPodsConstraintNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolMaxPodsConstraint) + if !ok { + desiredNotPointer, ok := d.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint or *NodePoolMaxPodsConstraint", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolMaxPodsConstraint) + if !ok { + actualNotPointer, ok := a.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxPodsPerNode, actual.MaxPodsPerNode, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsPerNode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolManagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolManagement) + if !ok { + desiredNotPointer, ok := d.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement or *NodePoolManagement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolManagement) + if !ok { + actualNotPointer, ok := a.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutoRepair, actual.AutoRepair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("AutoRepair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolKubeletConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolKubeletConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolKubeletConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolKubeletConfig or *NodePoolKubeletConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolKubeletConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolKubeletConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolKubeletConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.CpuManagerPolicy, actual.CpuManagerPolicy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuManagerPolicy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuCfsQuota, actual.CpuCfsQuota, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuCfsQuota")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuCfsQuotaPeriod, actual.CpuCfsQuotaPeriod, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuCfsQuotaPeriod")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodPidsLimit, actual.PodPidsLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodPidsLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolUpdateSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolUpdateSettings) + if !ok { + desiredNotPointer, ok := d.(NodePoolUpdateSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettings or *NodePoolUpdateSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolUpdateSettings) + if !ok { + actualNotPointer, ok := a.(NodePoolUpdateSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SurgeSettings, actual.SurgeSettings, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolUpdateSettingsSurgeSettingsNewStyle, EmptyObject: EmptyNodePoolUpdateSettingsSurgeSettings, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SurgeSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolUpdateSettingsSurgeSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolUpdateSettingsSurgeSettings) + if !ok { + desiredNotPointer, ok := d.(NodePoolUpdateSettingsSurgeSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettingsSurgeSettings or *NodePoolUpdateSettingsSurgeSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolUpdateSettingsSurgeSettings) + if !ok { + actualNotPointer, ok := a.(NodePoolUpdateSettingsSurgeSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettingsSurgeSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxSurge, actual.MaxSurge, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MaxSurge")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxUnavailable, actual.MaxUnavailable, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MaxUnavailable")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *NodePool) urlNormalized() *NodePool { + normalized := dcl.Copy(*r).(NodePool) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Version = dcl.SelfLinkToName(r.Version) + normalized.SubnetId = dcl.SelfLinkToName(r.SubnetId) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Cluster = dcl.SelfLinkToName(r.Cluster) + return &normalized +} + +func (r *NodePool) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAwsNodePool" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the NodePool resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *NodePool) marshal(c *Client) ([]byte, error) { + m, err := expandNodePool(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling NodePool: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalNodePool decodes JSON responses into the NodePool resource schema. +func unmarshalNodePool(b []byte, c *Client, res *NodePool) (*NodePool, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapNodePool(m, c, res) +} + +func unmarshalMapNodePool(m map[string]interface{}, c *Client, res *NodePool) (*NodePool, error) { + + flattened := flattenNodePool(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandNodePool expands NodePool into a JSON request object. +func expandNodePool(c *Client, f *NodePool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/awsClusters/%s/awsNodePools/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Cluster), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Version; dcl.ValueShouldBeSent(v) { + m["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscaling"] = v + } + if v := f.SubnetId; dcl.ValueShouldBeSent(v) { + m["subnetId"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := expandNodePoolMaxPodsConstraint(c, f.MaxPodsConstraint, res); err != nil { + return nil, fmt.Errorf("error expanding MaxPodsConstraint into maxPodsConstraint: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["maxPodsConstraint"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + if v, err := expandNodePoolKubeletConfig(c, f.KubeletConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KubeletConfig into kubeletConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubeletConfig"] = v + } + if v, err := expandNodePoolUpdateSettings(c, f.UpdateSettings, res); err != nil { + return nil, fmt.Errorf("error expanding UpdateSettings into updateSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["updateSettings"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Cluster into cluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + + return m, nil +} + +// flattenNodePool flattens NodePool from a JSON request object into the +// NodePool type. +func flattenNodePool(c *Client, i interface{}, res *NodePool) *NodePool { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &NodePool{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Version = dcl.FlattenString(m["version"]) + resultRes.Config = flattenNodePoolConfig(c, m["config"], res) + resultRes.Autoscaling = flattenNodePoolAutoscaling(c, m["autoscaling"], res) + resultRes.SubnetId = dcl.FlattenString(m["subnetId"]) + resultRes.State = flattenNodePoolStateEnum(m["state"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.MaxPodsConstraint = flattenNodePoolMaxPodsConstraint(c, m["maxPodsConstraint"], res) + resultRes.Management = flattenNodePoolManagement(c, m["management"], res) + resultRes.KubeletConfig = flattenNodePoolKubeletConfig(c, m["kubeletConfig"], res) + resultRes.UpdateSettings = flattenNodePoolUpdateSettings(c, m["updateSettings"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Cluster = dcl.FlattenString(m["cluster"]) + + return resultRes +} + +// expandNodePoolConfigMap expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigMap(c *Client, f map[string]NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSlice expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigSlice(c *Client, f []NodePoolConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigMap flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfig{} + } + + items := make(map[string]NodePoolConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSlice flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfig{} + } + + if len(a) == 0 { + return []NodePoolConfig{} + } + + items := make([]NodePoolConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfig expands an instance of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfig(c *Client, f *NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceType; !dcl.IsEmptyValueIndirect(v) { + m["instanceType"] = v + } + if v, err := expandNodePoolConfigRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v, err := expandNodePoolConfigTaintsSlice(c, f.Taints, res); err != nil { + return nil, fmt.Errorf("error expanding Taints into taints: %w", err) + } else if v != nil { + m["taints"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v := f.IamInstanceProfile; !dcl.IsEmptyValueIndirect(v) { + m["iamInstanceProfile"] = v + } + if v, err := expandNodePoolConfigConfigEncryption(c, f.ConfigEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding ConfigEncryption into configEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["configEncryption"] = v + } + if v, err := expandNodePoolConfigSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } +{{- if ne $.TargetVersionName "ga" }} + if v, err := expandNodePoolConfigSpotConfig(c, f.SpotConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SpotConfig into spotConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["spotConfig"] = v + } +{{- end }} + if v := f.SecurityGroupIds; v != nil { + m["securityGroupIds"] = v + } + if v, err := expandNodePoolConfigProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandNodePoolConfigInstancePlacement(c, f.InstancePlacement, res); err != nil { + return nil, fmt.Errorf("error expanding InstancePlacement into instancePlacement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["instancePlacement"] = v + } + if v := f.ImageType; !dcl.IsEmptyValueIndirect(v) { + m["imageType"] = v +{{- end }} + } + if v, err := expandNodePoolConfigAutoscalingMetricsCollection(c, f.AutoscalingMetricsCollection, res); err != nil { + return nil, fmt.Errorf("error expanding AutoscalingMetricsCollection into autoscalingMetricsCollection: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscalingMetricsCollection"] = v + } + + return m, nil +} + +// flattenNodePoolConfig flattens an instance of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfig + } + r.InstanceType = dcl.FlattenString(m["instanceType"]) + r.RootVolume = flattenNodePoolConfigRootVolume(c, m["rootVolume"], res) + r.Taints = flattenNodePoolConfigTaintsSlice(c, m["taints"], res) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.IamInstanceProfile = dcl.FlattenString(m["iamInstanceProfile"]) + r.ConfigEncryption = flattenNodePoolConfigConfigEncryption(c, m["configEncryption"], res) + r.SshConfig = flattenNodePoolConfigSshConfig(c, m["sshConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.SpotConfig = flattenNodePoolConfigSpotConfig(c, m["spotConfig"], res) +{{- end }} + r.SecurityGroupIds = dcl.FlattenStringSlice(m["securityGroupIds"]) + r.ProxyConfig = flattenNodePoolConfigProxyConfig(c, m["proxyConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.InstancePlacement = flattenNodePoolConfigInstancePlacement(c, m["instancePlacement"], res) + r.ImageType = dcl.FlattenString(m["imageType"]) +{{- end }} + r.AutoscalingMetricsCollection = flattenNodePoolConfigAutoscalingMetricsCollection(c, m["autoscalingMetricsCollection"], res) + + return r +} + +// expandNodePoolConfigRootVolumeMap expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeMap(c *Client, f map[string]NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigRootVolumeSlice expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeSlice(c *Client, f []NodePoolConfigRootVolume, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigRootVolumeMap flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigRootVolume{} + } + + items := make(map[string]NodePoolConfigRootVolume) + for k, item := range a { + items[k] = *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigRootVolumeSlice flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return []NodePoolConfigRootVolume{} + } + + items := make([]NodePoolConfigRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigRootVolume expands an instance of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolume(c *Client, f *NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + if v := f.VolumeType; !dcl.IsEmptyValueIndirect(v) { + m["volumeType"] = v + } + if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { + m["iops"] = v + } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenNodePoolConfigRootVolume flattens an instance of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolume(c *Client, i interface{}, res *NodePool) *NodePoolConfigRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + r.VolumeType = flattenNodePoolConfigRootVolumeVolumeTypeEnum(m["volumeType"]) + r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandNodePoolConfigTaintsMap expands the contents of NodePoolConfigTaints into a JSON +// request object. +func expandNodePoolConfigTaintsMap(c *Client, f map[string]NodePoolConfigTaints, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigTaints(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigTaintsSlice expands the contents of NodePoolConfigTaints into a JSON +// request object. +func expandNodePoolConfigTaintsSlice(c *Client, f []NodePoolConfigTaints, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigTaints(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigTaintsMap flattens the contents of NodePoolConfigTaints from a JSON +// response object. +func flattenNodePoolConfigTaintsMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigTaints { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigTaints{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigTaints{} + } + + items := make(map[string]NodePoolConfigTaints) + for k, item := range a { + items[k] = *flattenNodePoolConfigTaints(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigTaintsSlice flattens the contents of NodePoolConfigTaints from a JSON +// response object. +func flattenNodePoolConfigTaintsSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigTaints { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigTaints{} + } + + if len(a) == 0 { + return []NodePoolConfigTaints{} + } + + items := make([]NodePoolConfigTaints, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigTaints(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigTaints expands an instance of NodePoolConfigTaints into a JSON +// request object. +func expandNodePoolConfigTaints(c *Client, f *NodePoolConfigTaints, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Value; !dcl.IsEmptyValueIndirect(v) { + m["value"] = v + } + if v := f.Effect; !dcl.IsEmptyValueIndirect(v) { + m["effect"] = v + } + + return m, nil +} + +// flattenNodePoolConfigTaints flattens an instance of NodePoolConfigTaints from a JSON +// response object. +func flattenNodePoolConfigTaints(c *Client, i interface{}, res *NodePool) *NodePoolConfigTaints { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigTaints{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigTaints + } + r.Key = dcl.FlattenString(m["key"]) + r.Value = dcl.FlattenString(m["value"]) + r.Effect = flattenNodePoolConfigTaintsEffectEnum(m["effect"]) + + return r +} + +// expandNodePoolConfigConfigEncryptionMap expands the contents of NodePoolConfigConfigEncryption into a JSON +// request object. +func expandNodePoolConfigConfigEncryptionMap(c *Client, f map[string]NodePoolConfigConfigEncryption, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigConfigEncryptionSlice expands the contents of NodePoolConfigConfigEncryption into a JSON +// request object. +func expandNodePoolConfigConfigEncryptionSlice(c *Client, f []NodePoolConfigConfigEncryption, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigConfigEncryptionMap flattens the contents of NodePoolConfigConfigEncryption from a JSON +// response object. +func flattenNodePoolConfigConfigEncryptionMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigConfigEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigConfigEncryption{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigConfigEncryption{} + } + + items := make(map[string]NodePoolConfigConfigEncryption) + for k, item := range a { + items[k] = *flattenNodePoolConfigConfigEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigConfigEncryptionSlice flattens the contents of NodePoolConfigConfigEncryption from a JSON +// response object. +func flattenNodePoolConfigConfigEncryptionSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigConfigEncryption { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigConfigEncryption{} + } + + if len(a) == 0 { + return []NodePoolConfigConfigEncryption{} + } + + items := make([]NodePoolConfigConfigEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigConfigEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigConfigEncryption expands an instance of NodePoolConfigConfigEncryption into a JSON +// request object. +func expandNodePoolConfigConfigEncryption(c *Client, f *NodePoolConfigConfigEncryption, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenNodePoolConfigConfigEncryption flattens an instance of NodePoolConfigConfigEncryption from a JSON +// response object. +func flattenNodePoolConfigConfigEncryption(c *Client, i interface{}, res *NodePool) *NodePoolConfigConfigEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigConfigEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigConfigEncryption + } + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandNodePoolConfigSshConfigMap expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigMap(c *Client, f map[string]NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSshConfigSlice expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigSlice(c *Client, f []NodePoolConfigSshConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigSshConfigMap flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigSshConfig{} + } + + items := make(map[string]NodePoolConfigSshConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSshConfigSlice flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigSshConfig{} + } + + items := make([]NodePoolConfigSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigSshConfig expands an instance of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfig(c *Client, f *NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Ec2KeyPair; !dcl.IsEmptyValueIndirect(v) { + m["ec2KeyPair"] = v + } + + return m, nil +} + +// flattenNodePoolConfigSshConfig flattens an instance of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigSshConfig + } + r.Ec2KeyPair = dcl.FlattenString(m["ec2KeyPair"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandNodePoolConfigSpotConfigMap expands the contents of NodePoolConfigSpotConfig into a JSON +// request object. +func expandNodePoolConfigSpotConfigMap(c *Client, f map[string]NodePoolConfigSpotConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigSpotConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSpotConfigSlice expands the contents of NodePoolConfigSpotConfig into a JSON +// request object. +func expandNodePoolConfigSpotConfigSlice(c *Client, f []NodePoolConfigSpotConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigSpotConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigSpotConfigMap flattens the contents of NodePoolConfigSpotConfig from a JSON +// response object. +func flattenNodePoolConfigSpotConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigSpotConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigSpotConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigSpotConfig{} + } + + items := make(map[string]NodePoolConfigSpotConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigSpotConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSpotConfigSlice flattens the contents of NodePoolConfigSpotConfig from a JSON +// response object. +func flattenNodePoolConfigSpotConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigSpotConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigSpotConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigSpotConfig{} + } + + items := make([]NodePoolConfigSpotConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigSpotConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigSpotConfig expands an instance of NodePoolConfigSpotConfig into a JSON +// request object. +func expandNodePoolConfigSpotConfig(c *Client, f *NodePoolConfigSpotConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceTypes; v != nil { + m["instanceTypes"] = v + } + + return m, nil +} + +// flattenNodePoolConfigSpotConfig flattens an instance of NodePoolConfigSpotConfig from a JSON +// response object. +func flattenNodePoolConfigSpotConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigSpotConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigSpotConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigSpotConfig + } + r.InstanceTypes = dcl.FlattenStringSlice(m["instanceTypes"]) + + return r +} + +{{- end }} +// expandNodePoolConfigProxyConfigMap expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigMap(c *Client, f map[string]NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigProxyConfigSlice expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigSlice(c *Client, f []NodePoolConfigProxyConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigProxyConfigMap flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigProxyConfig{} + } + + items := make(map[string]NodePoolConfigProxyConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigProxyConfigSlice flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigProxyConfig{} + } + + items := make([]NodePoolConfigProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigProxyConfig expands an instance of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfig(c *Client, f *NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SecretArn; !dcl.IsEmptyValueIndirect(v) { + m["secretArn"] = v + } + if v := f.SecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["secretVersion"] = v + } + + return m, nil +} + +// flattenNodePoolConfigProxyConfig flattens an instance of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigProxyConfig + } + r.SecretArn = dcl.FlattenString(m["secretArn"]) + r.SecretVersion = dcl.FlattenString(m["secretVersion"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandNodePoolConfigInstancePlacementMap expands the contents of NodePoolConfigInstancePlacement into a JSON +// request object. +func expandNodePoolConfigInstancePlacementMap(c *Client, f map[string]NodePoolConfigInstancePlacement, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigInstancePlacementSlice expands the contents of NodePoolConfigInstancePlacement into a JSON +// request object. +func expandNodePoolConfigInstancePlacementSlice(c *Client, f []NodePoolConfigInstancePlacement, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigInstancePlacementMap flattens the contents of NodePoolConfigInstancePlacement from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigInstancePlacement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigInstancePlacement{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigInstancePlacement{} + } + + items := make(map[string]NodePoolConfigInstancePlacement) + for k, item := range a { + items[k] = *flattenNodePoolConfigInstancePlacement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigInstancePlacementSlice flattens the contents of NodePoolConfigInstancePlacement from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigInstancePlacement { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigInstancePlacement{} + } + + if len(a) == 0 { + return []NodePoolConfigInstancePlacement{} + } + + items := make([]NodePoolConfigInstancePlacement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigInstancePlacement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigInstancePlacement expands an instance of NodePoolConfigInstancePlacement into a JSON +// request object. +func expandNodePoolConfigInstancePlacement(c *Client, f *NodePoolConfigInstancePlacement, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Tenancy; !dcl.IsEmptyValueIndirect(v) { + m["tenancy"] = v + } + + return m, nil +} + +// flattenNodePoolConfigInstancePlacement flattens an instance of NodePoolConfigInstancePlacement from a JSON +// response object. +func flattenNodePoolConfigInstancePlacement(c *Client, i interface{}, res *NodePool) *NodePoolConfigInstancePlacement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigInstancePlacement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigInstancePlacement + } + r.Tenancy = flattenNodePoolConfigInstancePlacementTenancyEnum(m["tenancy"]) + + return r +} + +{{- end }} +// expandNodePoolConfigAutoscalingMetricsCollectionMap expands the contents of NodePoolConfigAutoscalingMetricsCollection into a JSON +// request object. +func expandNodePoolConfigAutoscalingMetricsCollectionMap(c *Client, f map[string]NodePoolConfigAutoscalingMetricsCollection, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigAutoscalingMetricsCollection(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigAutoscalingMetricsCollectionSlice expands the contents of NodePoolConfigAutoscalingMetricsCollection into a JSON +// request object. +func expandNodePoolConfigAutoscalingMetricsCollectionSlice(c *Client, f []NodePoolConfigAutoscalingMetricsCollection, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigAutoscalingMetricsCollection(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigAutoscalingMetricsCollectionMap flattens the contents of NodePoolConfigAutoscalingMetricsCollection from a JSON +// response object. +func flattenNodePoolConfigAutoscalingMetricsCollectionMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigAutoscalingMetricsCollection { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigAutoscalingMetricsCollection{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigAutoscalingMetricsCollection{} + } + + items := make(map[string]NodePoolConfigAutoscalingMetricsCollection) + for k, item := range a { + items[k] = *flattenNodePoolConfigAutoscalingMetricsCollection(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigAutoscalingMetricsCollectionSlice flattens the contents of NodePoolConfigAutoscalingMetricsCollection from a JSON +// response object. +func flattenNodePoolConfigAutoscalingMetricsCollectionSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigAutoscalingMetricsCollection { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigAutoscalingMetricsCollection{} + } + + if len(a) == 0 { + return []NodePoolConfigAutoscalingMetricsCollection{} + } + + items := make([]NodePoolConfigAutoscalingMetricsCollection, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigAutoscalingMetricsCollection(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigAutoscalingMetricsCollection expands an instance of NodePoolConfigAutoscalingMetricsCollection into a JSON +// request object. +func expandNodePoolConfigAutoscalingMetricsCollection(c *Client, f *NodePoolConfigAutoscalingMetricsCollection, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Granularity; !dcl.IsEmptyValueIndirect(v) { + m["granularity"] = v + } + if v := f.Metrics; v != nil { + m["metrics"] = v + } + + return m, nil +} + +// flattenNodePoolConfigAutoscalingMetricsCollection flattens an instance of NodePoolConfigAutoscalingMetricsCollection from a JSON +// response object. +func flattenNodePoolConfigAutoscalingMetricsCollection(c *Client, i interface{}, res *NodePool) *NodePoolConfigAutoscalingMetricsCollection { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigAutoscalingMetricsCollection{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigAutoscalingMetricsCollection + } + r.Granularity = dcl.FlattenString(m["granularity"]) + r.Metrics = dcl.FlattenStringSlice(m["metrics"]) + + return r +} + +// expandNodePoolAutoscalingMap expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingMap(c *Client, f map[string]NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolAutoscalingSlice expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingSlice(c *Client, f []NodePoolAutoscaling, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolAutoscalingMap flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolAutoscaling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolAutoscaling{} + } + + if len(a) == 0 { + return map[string]NodePoolAutoscaling{} + } + + items := make(map[string]NodePoolAutoscaling) + for k, item := range a { + items[k] = *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolAutoscalingSlice flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingSlice(c *Client, i interface{}, res *NodePool) []NodePoolAutoscaling { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolAutoscaling{} + } + + if len(a) == 0 { + return []NodePoolAutoscaling{} + } + + items := make([]NodePoolAutoscaling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolAutoscaling expands an instance of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscaling(c *Client, f *NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["minNodeCount"] = v + } + if v := f.MaxNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["maxNodeCount"] = v + } + + return m, nil +} + +// flattenNodePoolAutoscaling flattens an instance of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscaling(c *Client, i interface{}, res *NodePool) *NodePoolAutoscaling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolAutoscaling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolAutoscaling + } + r.MinNodeCount = dcl.FlattenInteger(m["minNodeCount"]) + r.MaxNodeCount = dcl.FlattenInteger(m["maxNodeCount"]) + + return r +} + +// expandNodePoolMaxPodsConstraintMap expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintMap(c *Client, f map[string]NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolMaxPodsConstraintSlice expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintSlice(c *Client, f []NodePoolMaxPodsConstraint, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolMaxPodsConstraintMap flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolMaxPodsConstraint { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return map[string]NodePoolMaxPodsConstraint{} + } + + items := make(map[string]NodePoolMaxPodsConstraint) + for k, item := range a { + items[k] = *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolMaxPodsConstraintSlice flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintSlice(c *Client, i interface{}, res *NodePool) []NodePoolMaxPodsConstraint { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return []NodePoolMaxPodsConstraint{} + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolMaxPodsConstraint expands an instance of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraint(c *Client, f *NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxPodsPerNode; !dcl.IsEmptyValueIndirect(v) { + m["maxPodsPerNode"] = v + } + + return m, nil +} + +// flattenNodePoolMaxPodsConstraint flattens an instance of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraint(c *Client, i interface{}, res *NodePool) *NodePoolMaxPodsConstraint { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolMaxPodsConstraint{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolMaxPodsConstraint + } + r.MaxPodsPerNode = dcl.FlattenInteger(m["maxPodsPerNode"]) + + return r +} + +// expandNodePoolManagementMap expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementMap(c *Client, f map[string]NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolManagementSlice expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementSlice(c *Client, f []NodePoolManagement, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolManagementMap flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolManagement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolManagement{} + } + + if len(a) == 0 { + return map[string]NodePoolManagement{} + } + + items := make(map[string]NodePoolManagement) + for k, item := range a { + items[k] = *flattenNodePoolManagement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolManagementSlice flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementSlice(c *Client, i interface{}, res *NodePool) []NodePoolManagement { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolManagement{} + } + + if len(a) == 0 { + return []NodePoolManagement{} + } + + items := make([]NodePoolManagement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolManagement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolManagement expands an instance of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagement(c *Client, f *NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutoRepair; !dcl.IsEmptyValueIndirect(v) { + m["autoRepair"] = v + } + + return m, nil +} + +// flattenNodePoolManagement flattens an instance of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagement(c *Client, i interface{}, res *NodePool) *NodePoolManagement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolManagement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolManagement + } + r.AutoRepair = dcl.FlattenBool(m["autoRepair"]) + + return r +} + +// expandNodePoolKubeletConfigMap expands the contents of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfigMap(c *Client, f map[string]NodePoolKubeletConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolKubeletConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolKubeletConfigSlice expands the contents of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfigSlice(c *Client, f []NodePoolKubeletConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolKubeletConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolKubeletConfigMap flattens the contents of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolKubeletConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolKubeletConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolKubeletConfig{} + } + + items := make(map[string]NodePoolKubeletConfig) + for k, item := range a { + items[k] = *flattenNodePoolKubeletConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolKubeletConfigSlice flattens the contents of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolKubeletConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolKubeletConfig{} + } + + if len(a) == 0 { + return []NodePoolKubeletConfig{} + } + + items := make([]NodePoolKubeletConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolKubeletConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolKubeletConfig expands an instance of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfig(c *Client, f *NodePoolKubeletConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.CpuManagerPolicy; !dcl.IsEmptyValueIndirect(v) { + m["cpuManagerPolicy"] = v + } + if v := f.CpuCfsQuota; !dcl.IsEmptyValueIndirect(v) { + m["cpuCfsQuota"] = v + } + if v := f.CpuCfsQuotaPeriod; !dcl.IsEmptyValueIndirect(v) { + m["cpuCfsQuotaPeriod"] = v + } + if v := f.PodPidsLimit; !dcl.IsEmptyValueIndirect(v) { + m["podPidsLimit"] = v + } + + return m, nil +} + +// flattenNodePoolKubeletConfig flattens an instance of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfig(c *Client, i interface{}, res *NodePool) *NodePoolKubeletConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolKubeletConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolKubeletConfig + } + r.CpuManagerPolicy = flattenNodePoolKubeletConfigCpuManagerPolicyEnum(m["cpuManagerPolicy"]) + r.CpuCfsQuota = dcl.FlattenBool(m["cpuCfsQuota"]) + r.CpuCfsQuotaPeriod = dcl.FlattenString(m["cpuCfsQuotaPeriod"]) + r.PodPidsLimit = dcl.FlattenInteger(m["podPidsLimit"]) + + return r +} + +// expandNodePoolUpdateSettingsMap expands the contents of NodePoolUpdateSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsMap(c *Client, f map[string]NodePoolUpdateSettings, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolUpdateSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolUpdateSettingsSlice expands the contents of NodePoolUpdateSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSlice(c *Client, f []NodePoolUpdateSettings, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolUpdateSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolUpdateSettingsMap flattens the contents of NodePoolUpdateSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolUpdateSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolUpdateSettings{} + } + + if len(a) == 0 { + return map[string]NodePoolUpdateSettings{} + } + + items := make(map[string]NodePoolUpdateSettings) + for k, item := range a { + items[k] = *flattenNodePoolUpdateSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolUpdateSettingsSlice flattens the contents of NodePoolUpdateSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSlice(c *Client, i interface{}, res *NodePool) []NodePoolUpdateSettings { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolUpdateSettings{} + } + + if len(a) == 0 { + return []NodePoolUpdateSettings{} + } + + items := make([]NodePoolUpdateSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolUpdateSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolUpdateSettings expands an instance of NodePoolUpdateSettings into a JSON +// request object. +func expandNodePoolUpdateSettings(c *Client, f *NodePoolUpdateSettings, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandNodePoolUpdateSettingsSurgeSettings(c, f.SurgeSettings, res); err != nil { + return nil, fmt.Errorf("error expanding SurgeSettings into surgeSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["surgeSettings"] = v + } + + return m, nil +} + +// flattenNodePoolUpdateSettings flattens an instance of NodePoolUpdateSettings from a JSON +// response object. +func flattenNodePoolUpdateSettings(c *Client, i interface{}, res *NodePool) *NodePoolUpdateSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolUpdateSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolUpdateSettings + } + r.SurgeSettings = flattenNodePoolUpdateSettingsSurgeSettings(c, m["surgeSettings"], res) + + return r +} + +// expandNodePoolUpdateSettingsSurgeSettingsMap expands the contents of NodePoolUpdateSettingsSurgeSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSurgeSettingsMap(c *Client, f map[string]NodePoolUpdateSettingsSurgeSettings, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolUpdateSettingsSurgeSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolUpdateSettingsSurgeSettingsSlice expands the contents of NodePoolUpdateSettingsSurgeSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSurgeSettingsSlice(c *Client, f []NodePoolUpdateSettingsSurgeSettings, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolUpdateSettingsSurgeSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolUpdateSettingsSurgeSettingsMap flattens the contents of NodePoolUpdateSettingsSurgeSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSurgeSettingsMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolUpdateSettingsSurgeSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolUpdateSettingsSurgeSettings{} + } + + if len(a) == 0 { + return map[string]NodePoolUpdateSettingsSurgeSettings{} + } + + items := make(map[string]NodePoolUpdateSettingsSurgeSettings) + for k, item := range a { + items[k] = *flattenNodePoolUpdateSettingsSurgeSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolUpdateSettingsSurgeSettingsSlice flattens the contents of NodePoolUpdateSettingsSurgeSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSurgeSettingsSlice(c *Client, i interface{}, res *NodePool) []NodePoolUpdateSettingsSurgeSettings { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolUpdateSettingsSurgeSettings{} + } + + if len(a) == 0 { + return []NodePoolUpdateSettingsSurgeSettings{} + } + + items := make([]NodePoolUpdateSettingsSurgeSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolUpdateSettingsSurgeSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolUpdateSettingsSurgeSettings expands an instance of NodePoolUpdateSettingsSurgeSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSurgeSettings(c *Client, f *NodePoolUpdateSettingsSurgeSettings, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxSurge; !dcl.IsEmptyValueIndirect(v) { + m["maxSurge"] = v + } + if v := f.MaxUnavailable; !dcl.IsEmptyValueIndirect(v) { + m["maxUnavailable"] = v + } + + return m, nil +} + +// flattenNodePoolUpdateSettingsSurgeSettings flattens an instance of NodePoolUpdateSettingsSurgeSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSurgeSettings(c *Client, i interface{}, res *NodePool) *NodePoolUpdateSettingsSurgeSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolUpdateSettingsSurgeSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolUpdateSettingsSurgeSettings + } + r.MaxSurge = dcl.FlattenInteger(m["maxSurge"]) + r.MaxUnavailable = dcl.FlattenInteger(m["maxUnavailable"]) + + return r +} + +// flattenNodePoolConfigRootVolumeVolumeTypeEnumMap flattens the contents of NodePoolConfigRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenNodePoolConfigRootVolumeVolumeTypeEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigRootVolumeVolumeTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + items := make(map[string]NodePoolConfigRootVolumeVolumeTypeEnum) + for k, item := range a { + items[k] = *flattenNodePoolConfigRootVolumeVolumeTypeEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolConfigRootVolumeVolumeTypeEnumSlice flattens the contents of NodePoolConfigRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenNodePoolConfigRootVolumeVolumeTypeEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigRootVolumeVolumeTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return []NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + items := make([]NodePoolConfigRootVolumeVolumeTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigRootVolumeVolumeTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolConfigRootVolumeVolumeTypeEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolConfigRootVolumeVolumeTypeEnum with the same value as that string. +func flattenNodePoolConfigRootVolumeVolumeTypeEnum(i interface{}) *NodePoolConfigRootVolumeVolumeTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolConfigRootVolumeVolumeTypeEnumRef(s) +} + +// flattenNodePoolConfigTaintsEffectEnumMap flattens the contents of NodePoolConfigTaintsEffectEnum from a JSON +// response object. +func flattenNodePoolConfigTaintsEffectEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigTaintsEffectEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigTaintsEffectEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigTaintsEffectEnum{} + } + + items := make(map[string]NodePoolConfigTaintsEffectEnum) + for k, item := range a { + items[k] = *flattenNodePoolConfigTaintsEffectEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolConfigTaintsEffectEnumSlice flattens the contents of NodePoolConfigTaintsEffectEnum from a JSON +// response object. +func flattenNodePoolConfigTaintsEffectEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigTaintsEffectEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigTaintsEffectEnum{} + } + + if len(a) == 0 { + return []NodePoolConfigTaintsEffectEnum{} + } + + items := make([]NodePoolConfigTaintsEffectEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigTaintsEffectEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolConfigTaintsEffectEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolConfigTaintsEffectEnum with the same value as that string. +func flattenNodePoolConfigTaintsEffectEnum(i interface{}) *NodePoolConfigTaintsEffectEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolConfigTaintsEffectEnumRef(s) +{{- if ne $.TargetVersionName "ga" }} +} + +// flattenNodePoolConfigInstancePlacementTenancyEnumMap flattens the contents of NodePoolConfigInstancePlacementTenancyEnum from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementTenancyEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigInstancePlacementTenancyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigInstancePlacementTenancyEnum{} + } + + items := make(map[string]NodePoolConfigInstancePlacementTenancyEnum) + for k, item := range a { + items[k] = *flattenNodePoolConfigInstancePlacementTenancyEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolConfigInstancePlacementTenancyEnumSlice flattens the contents of NodePoolConfigInstancePlacementTenancyEnum from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementTenancyEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigInstancePlacementTenancyEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return []NodePoolConfigInstancePlacementTenancyEnum{} + } + + items := make([]NodePoolConfigInstancePlacementTenancyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigInstancePlacementTenancyEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolConfigInstancePlacementTenancyEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolConfigInstancePlacementTenancyEnum with the same value as that string. +func flattenNodePoolConfigInstancePlacementTenancyEnum(i interface{}) *NodePoolConfigInstancePlacementTenancyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolConfigInstancePlacementTenancyEnumRef(s) +{{- end }} +} + +// flattenNodePoolStateEnumMap flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolStateEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolStateEnum{} + } + + items := make(map[string]NodePoolStateEnum) + for k, item := range a { + items[k] = *flattenNodePoolStateEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolStateEnumSlice flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolStateEnum{} + } + + if len(a) == 0 { + return []NodePoolStateEnum{} + } + + items := make([]NodePoolStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolStateEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolStateEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolStateEnum with the same value as that string. +func flattenNodePoolStateEnum(i interface{}) *NodePoolStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolStateEnumRef(s) +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnumMap flattens the contents of NodePoolKubeletConfigCpuManagerPolicyEnum from a JSON +// response object. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolKubeletConfigCpuManagerPolicyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + items := make(map[string]NodePoolKubeletConfigCpuManagerPolicyEnum) + for k, item := range a { + items[k] = *flattenNodePoolKubeletConfigCpuManagerPolicyEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnumSlice flattens the contents of NodePoolKubeletConfigCpuManagerPolicyEnum from a JSON +// response object. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolKubeletConfigCpuManagerPolicyEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + if len(a) == 0 { + return []NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + items := make([]NodePoolKubeletConfigCpuManagerPolicyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolKubeletConfigCpuManagerPolicyEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolKubeletConfigCpuManagerPolicyEnum with the same value as that string. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnum(i interface{}) *NodePoolKubeletConfigCpuManagerPolicyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolKubeletConfigCpuManagerPolicyEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *NodePool) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalNodePool(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Cluster == nil && ncr.Cluster == nil { + c.Config.Logger.Info("Both Cluster fields null - considering equal.") + } else if nr.Cluster == nil || ncr.Cluster == nil { + c.Config.Logger.Info("Only one Cluster field is null - considering unequal.") + return false + } else if *nr.Cluster != *ncr.Cluster { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type nodePoolDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp nodePoolApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToNodePoolDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]nodePoolDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []nodePoolDiff + // For each operation name, create a nodePoolDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := nodePoolDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToNodePoolApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToNodePoolApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (nodePoolApiOperation, error) { + switch opName { + + case "updateNodePoolUpdateAwsNodePoolOperation": + return &updateNodePoolUpdateAwsNodePoolOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := extractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := extractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := extractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := extractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + vKubeletConfig := r.KubeletConfig + if vKubeletConfig == nil { + // note: explicitly not the empty object. + vKubeletConfig = &NodePoolKubeletConfig{} + } + if err := extractNodePoolKubeletConfigFields(r, vKubeletConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubeletConfig) { + r.KubeletConfig = vKubeletConfig + } + vUpdateSettings := r.UpdateSettings + if vUpdateSettings == nil { + // note: explicitly not the empty object. + vUpdateSettings = &NodePoolUpdateSettings{} + } + if err := extractNodePoolUpdateSettingsFields(r, vUpdateSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vUpdateSettings) { + r.UpdateSettings = vUpdateSettings + } + return nil +} +func extractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &NodePoolConfigConfigEncryption{} + } + if err := extractNodePoolConfigConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } +{{- if ne $.TargetVersionName "ga" }} + vSpotConfig := o.SpotConfig + if vSpotConfig == nil { + // note: explicitly not the empty object. + vSpotConfig = &NodePoolConfigSpotConfig{} + } + if err := extractNodePoolConfigSpotConfigFields(r, vSpotConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSpotConfig) { + o.SpotConfig = vSpotConfig + } +{{- end }} + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &NodePoolConfigInstancePlacement{} + } + if err := extractNodePoolConfigInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + vAutoscalingMetricsCollection := o.AutoscalingMetricsCollection + if vAutoscalingMetricsCollection == nil { + // note: explicitly not the empty object. + vAutoscalingMetricsCollection = &NodePoolConfigAutoscalingMetricsCollection{} + } + if err := extractNodePoolConfigAutoscalingMetricsCollectionFields(r, vAutoscalingMetricsCollection); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingMetricsCollection) { + o.AutoscalingMetricsCollection = vAutoscalingMetricsCollection + } + return nil +} +func extractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func extractNodePoolConfigTaintsFields(r *NodePool, o *NodePoolConfigTaints) error { + return nil +} +func extractNodePoolConfigConfigEncryptionFields(r *NodePool, o *NodePoolConfigConfigEncryption) error { + return nil +} +func extractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractNodePoolConfigSpotConfigFields(r *NodePool, o *NodePoolConfigSpotConfig) error { + return nil +} +{{- end }} +func extractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func extractNodePoolConfigInstancePlacementFields(r *NodePool, o *NodePoolConfigInstancePlacement) error { +{{- end }} + return nil +} +func extractNodePoolConfigAutoscalingMetricsCollectionFields(r *NodePool, o *NodePoolConfigAutoscalingMetricsCollection) error { + return nil +} +func extractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func extractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func extractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} +func extractNodePoolKubeletConfigFields(r *NodePool, o *NodePoolKubeletConfig) error { + return nil +} +func extractNodePoolUpdateSettingsFields(r *NodePool, o *NodePoolUpdateSettings) error { + vSurgeSettings := o.SurgeSettings + if vSurgeSettings == nil { + // note: explicitly not the empty object. + vSurgeSettings = &NodePoolUpdateSettingsSurgeSettings{} + } + if err := extractNodePoolUpdateSettingsSurgeSettingsFields(r, vSurgeSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSurgeSettings) { + o.SurgeSettings = vSurgeSettings + } + return nil +} +func extractNodePoolUpdateSettingsSurgeSettingsFields(r *NodePool, o *NodePoolUpdateSettingsSurgeSettings) error { + return nil +} + +func postReadExtractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := postReadExtractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := postReadExtractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := postReadExtractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := postReadExtractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + vKubeletConfig := r.KubeletConfig + if vKubeletConfig == nil { + // note: explicitly not the empty object. + vKubeletConfig = &NodePoolKubeletConfig{} + } + if err := postReadExtractNodePoolKubeletConfigFields(r, vKubeletConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubeletConfig) { + r.KubeletConfig = vKubeletConfig + } + vUpdateSettings := r.UpdateSettings + if vUpdateSettings == nil { + // note: explicitly not the empty object. + vUpdateSettings = &NodePoolUpdateSettings{} + } + if err := postReadExtractNodePoolUpdateSettingsFields(r, vUpdateSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vUpdateSettings) { + r.UpdateSettings = vUpdateSettings + } + return nil +} +func postReadExtractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &NodePoolConfigConfigEncryption{} + } + if err := extractNodePoolConfigConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } +{{- if ne $.TargetVersionName "ga" }} + vSpotConfig := o.SpotConfig + if vSpotConfig == nil { + // note: explicitly not the empty object. + vSpotConfig = &NodePoolConfigSpotConfig{} + } + if err := extractNodePoolConfigSpotConfigFields(r, vSpotConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSpotConfig) { + o.SpotConfig = vSpotConfig + } +{{- end }} + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &NodePoolConfigInstancePlacement{} + } + if err := extractNodePoolConfigInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + vAutoscalingMetricsCollection := o.AutoscalingMetricsCollection + if vAutoscalingMetricsCollection == nil { + // note: explicitly not the empty object. + vAutoscalingMetricsCollection = &NodePoolConfigAutoscalingMetricsCollection{} + } + if err := extractNodePoolConfigAutoscalingMetricsCollectionFields(r, vAutoscalingMetricsCollection); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingMetricsCollection) { + o.AutoscalingMetricsCollection = vAutoscalingMetricsCollection + } + return nil +} +func postReadExtractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func postReadExtractNodePoolConfigTaintsFields(r *NodePool, o *NodePoolConfigTaints) error { + return nil +} +func postReadExtractNodePoolConfigConfigEncryptionFields(r *NodePool, o *NodePoolConfigConfigEncryption) error { + return nil +} +func postReadExtractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractNodePoolConfigSpotConfigFields(r *NodePool, o *NodePoolConfigSpotConfig) error { + return nil +} +{{- end }} +func postReadExtractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func postReadExtractNodePoolConfigInstancePlacementFields(r *NodePool, o *NodePoolConfigInstancePlacement) error { +{{- end }} + return nil +} +func postReadExtractNodePoolConfigAutoscalingMetricsCollectionFields(r *NodePool, o *NodePoolConfigAutoscalingMetricsCollection) error { + return nil +} +func postReadExtractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func postReadExtractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func postReadExtractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} +func postReadExtractNodePoolKubeletConfigFields(r *NodePool, o *NodePoolKubeletConfig) error { + return nil +} +func postReadExtractNodePoolUpdateSettingsFields(r *NodePool, o *NodePoolUpdateSettings) error { + vSurgeSettings := o.SurgeSettings + if vSurgeSettings == nil { + // note: explicitly not the empty object. + vSurgeSettings = &NodePoolUpdateSettingsSurgeSettings{} + } + if err := extractNodePoolUpdateSettingsSurgeSettingsFields(r, vSurgeSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSurgeSettings) { + o.SurgeSettings = vSurgeSettings + } + return nil +} +func postReadExtractNodePoolUpdateSettingsSurgeSettingsFields(r *NodePool, o *NodePoolUpdateSettingsSurgeSettings) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go new file mode 100644 index 000000000000..699256d6e026 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package containeraws + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLContainerAwsClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ContainerAwsBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl new file mode 100644 index 000000000000..fe0732db0d9d --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl @@ -0,0 +1,1571 @@ +package containeraws + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAwsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAwsClusterCreate, + Read: resourceContainerAwsClusterRead, + Update: resourceContainerAwsClusterUpdate, + Delete: resourceContainerAwsClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAwsClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "authorization": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster RBAC settings.", + MaxItems: 1, + Elem: ContainerAwsClusterAuthorizationSchema(), + }, + + "aws_region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region.", + }, + + "control_plane": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster control plane.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneSchema(), + }, + + "fleet": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Fleet configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterFleetSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "networking": { + Type: schema.TypeList, + Required: true, + Description: "Cluster-wide networking configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterNetworkingSchema(), + }, + + "binary_authorization": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration options for the Binary Authorization feature.", + MaxItems: 1, + Elem: ContainerAwsClusterBinaryAuthorizationSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", +{{- if ne $.TargetVersionName "ga" }} + }, + + "logging_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Logging configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterLoggingConfigSchema(), +{{- end }} + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was created.", + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The endpoint of the cluster's API server.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the cluster.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was last updated.", + }, + + "workload_identity_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Workload Identity settings.", + Elem: ContainerAwsClusterWorkloadIdentityConfigSchema(), + }, + }, + } +} + +func ContainerAwsClusterAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_users": { + Type: schema.TypeList, + Required: true, + Description: "Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAwsClusterAuthorizationAdminUsersSchema(), + }, + + "admin_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAwsClusterAuthorizationAdminGroupsSchema(), + }, + }, + } +} + +func ContainerAwsClusterAuthorizationAdminUsersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + } +} + +func ContainerAwsClusterAuthorizationAdminGroupsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + Description: "The name of the group, e.g. `my-group@domain.com`.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_services_authentication": { + Type: schema.TypeList, + Required: true, + Description: "Authentication configuration for management of AWS resources.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneAwsServicesAuthenticationSchema(), + }, + + "config_encryption": { + Type: schema.TypeList, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneConfigEncryptionSchema(), + }, + + "database_encryption": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneDatabaseEncryptionSchema(), + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Required: true, + Description: "The name of the AWS IAM instance pofile to assign to each control plane replica.", + }, + + "subnet_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling .", + }, + +{{- if ne $.TargetVersionName "ga" }} + "instance_placement": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Details of placement information for an instance.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneInstancePlacementSchema(), + }, + +{{- end }} + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", + }, + + "main_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneMainVolumeSchema(), + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneRootVolumeSchema(), + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. SSH configuration for how to access the underlying control plane machines.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneSshConfigSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ContainerAwsClusterControlPlaneAwsServicesAuthenticationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + Description: "The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account.", + }, + + "role_session_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneConfigEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneDatabaseEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsClusterControlPlaneInstancePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tenancy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", +{{- end }} + }, + }, + } +} + +func ContainerAwsClusterControlPlaneMainVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + + "secret_version": { + Type: schema.TypeString, + Required: true, + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_key_pair": { + Type: schema.TypeString, + Required: true, + Description: "The name of the EC2 key pair used to login into cluster machines.", + }, + }, + } +} + +func ContainerAwsClusterFleetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The number of the Fleet host project where this cluster will be registered.", + }, + + "membership": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + }, + }, + } +} + +func ContainerAwsClusterNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation.", + }, + + "per_node_pool_sg_rules_disabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools.", + }, + }, + } +} + +func ContainerAwsClusterBinaryAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "evaluation_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsClusterLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration of the logging components.", + MaxItems: 1, + Elem: ContainerAwsClusterLoggingConfigComponentConfigSchema(), + }, + }, + } +} + +func ContainerAwsClusterLoggingConfigComponentConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Components of the logging configuration to be enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- end }} + }, + }, + } +} + +func ContainerAwsClusterWorkloadIdentityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + }, + + "issuer_uri": { + Type: schema.TypeString, + Computed: true, + Description: "The OIDC issuer URL for this cluster.", + }, + + "workload_pool": { + Type: schema.TypeString, + Computed: true, + Description: "The Workload Identity Pool associated to the cluster.", + }, + }, + } +} + +func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAwsClusterRead(d, meta) +} + +func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCluster(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAwsCluster %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("authorization", flattenContainerAwsClusterAuthorization(res.Authorization)); err != nil { + return fmt.Errorf("error setting authorization in state: %s", err) + } + if err = d.Set("aws_region", res.AwsRegion); err != nil { + return fmt.Errorf("error setting aws_region in state: %s", err) + } + if err = d.Set("control_plane", flattenContainerAwsClusterControlPlane(res.ControlPlane)); err != nil { + return fmt.Errorf("error setting control_plane in state: %s", err) + } + if err = d.Set("fleet", flattenContainerAwsClusterFleet(res.Fleet)); err != nil { + return fmt.Errorf("error setting fleet in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("networking", flattenContainerAwsClusterNetworking(res.Networking)); err != nil { + return fmt.Errorf("error setting networking in state: %s", err) + } + if err = d.Set("binary_authorization", flattenContainerAwsClusterBinaryAuthorization(res.BinaryAuthorization)); err != nil { + return fmt.Errorf("error setting binary_authorization in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) +{{- if ne $.TargetVersionName "ga" }} + } + if err = d.Set("logging_config", flattenContainerAwsClusterLoggingConfig(res.LoggingConfig)); err != nil { + return fmt.Errorf("error setting logging_config in state: %s", err) +{{- end }} + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAwsClusterAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("endpoint", res.Endpoint); err != nil { + return fmt.Errorf("error setting endpoint in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + if err = d.Set("workload_identity_config", flattenContainerAwsClusterWorkloadIdentityConfig(res.WorkloadIdentityConfig)); err != nil { + return fmt.Errorf("error setting workload_identity_config in state: %s", err) + } + + return nil +} +func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + directive := dcl.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAwsClusterRead(d, meta) +} + +func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCluster(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Cluster %q", d.Id()) + return nil +} + +func resourceContainerAwsClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAwsClusterAuthorization(o interface{}) *ClusterAuthorization { + if o == nil { + return EmptyClusterAuthorization + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterAuthorization + } + obj := objArr[0].(map[string]interface{}) + return &ClusterAuthorization{ + AdminUsers: expandContainerAwsClusterAuthorizationAdminUsersArray(obj["admin_users"]), + AdminGroups: expandContainerAwsClusterAuthorizationAdminGroupsArray(obj["admin_groups"]), + } +} + +func flattenContainerAwsClusterAuthorization(obj *ClusterAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "admin_users": flattenContainerAwsClusterAuthorizationAdminUsersArray(obj.AdminUsers), + "admin_groups": flattenContainerAwsClusterAuthorizationAdminGroupsArray(obj.AdminGroups), + } + + return []interface{}{transformed} + +} +func expandContainerAwsClusterAuthorizationAdminUsersArray(o interface{}) []ClusterAuthorizationAdminUsers { + if o == nil { + return make([]ClusterAuthorizationAdminUsers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]ClusterAuthorizationAdminUsers, 0) + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsClusterAuthorizationAdminUsers(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsClusterAuthorizationAdminUsers(o interface{}) *ClusterAuthorizationAdminUsers { + if o == nil { + return EmptyClusterAuthorizationAdminUsers + } + + obj := o.(map[string]interface{}) + return &ClusterAuthorizationAdminUsers{ + Username: dcl.String(obj["username"].(string)), + } +} + +func flattenContainerAwsClusterAuthorizationAdminUsersArray(objs []ClusterAuthorizationAdminUsers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsClusterAuthorizationAdminUsers(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsClusterAuthorizationAdminUsers(obj *ClusterAuthorizationAdminUsers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "username": obj.Username, + } + + return transformed + +} +func expandContainerAwsClusterAuthorizationAdminGroupsArray(o interface{}) []ClusterAuthorizationAdminGroups { + if o == nil { + return make([]ClusterAuthorizationAdminGroups, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]ClusterAuthorizationAdminGroups, 0) + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsClusterAuthorizationAdminGroups(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsClusterAuthorizationAdminGroups(o interface{}) *ClusterAuthorizationAdminGroups { + if o == nil { + return EmptyClusterAuthorizationAdminGroups + } + + obj := o.(map[string]interface{}) + return &ClusterAuthorizationAdminGroups{ + Group: dcl.String(obj["group"].(string)), + } +} + +func flattenContainerAwsClusterAuthorizationAdminGroupsArray(objs []ClusterAuthorizationAdminGroups) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsClusterAuthorizationAdminGroups(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsClusterAuthorizationAdminGroups(obj *ClusterAuthorizationAdminGroups) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "group": obj.Group, + } + + return transformed + +} + +func expandContainerAwsClusterControlPlane(o interface{}) *ClusterControlPlane { + if o == nil { + return EmptyClusterControlPlane + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlane + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlane{ + AwsServicesAuthentication: expandContainerAwsClusterControlPlaneAwsServicesAuthentication(obj["aws_services_authentication"]), + ConfigEncryption: expandContainerAwsClusterControlPlaneConfigEncryption(obj["config_encryption"]), + DatabaseEncryption: expandContainerAwsClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), + IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), + SubnetIds: dcl.ExpandStringArray(obj["subnet_ids"]), + Version: dcl.String(obj["version"].(string)), +{{- if ne $.TargetVersionName "ga" }} + InstancePlacement: expandContainerAwsClusterControlPlaneInstancePlacement(obj["instance_placement"]), +{{- end }} + InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), + MainVolume: expandContainerAwsClusterControlPlaneMainVolume(obj["main_volume"]), + ProxyConfig: expandContainerAwsClusterControlPlaneProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAwsClusterControlPlaneRootVolume(obj["root_volume"]), + SecurityGroupIds: dcl.ExpandStringArray(obj["security_group_ids"]), + SshConfig: expandContainerAwsClusterControlPlaneSshConfig(obj["ssh_config"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + } +} + +func flattenContainerAwsClusterControlPlane(obj *ClusterControlPlane) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "aws_services_authentication": flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj.AwsServicesAuthentication), + "config_encryption": flattenContainerAwsClusterControlPlaneConfigEncryption(obj.ConfigEncryption), + "database_encryption": flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj.DatabaseEncryption), + "iam_instance_profile": obj.IamInstanceProfile, + "subnet_ids": obj.SubnetIds, + "version": obj.Version, +{{- if ne $.TargetVersionName "ga" }} + "instance_placement": flattenContainerAwsClusterControlPlaneInstancePlacement(obj.InstancePlacement), +{{- end }} + "instance_type": obj.InstanceType, + "main_volume": flattenContainerAwsClusterControlPlaneMainVolume(obj.MainVolume), + "proxy_config": flattenContainerAwsClusterControlPlaneProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAwsClusterControlPlaneRootVolume(obj.RootVolume), + "security_group_ids": obj.SecurityGroupIds, + "ssh_config": flattenContainerAwsClusterControlPlaneSshConfig(obj.SshConfig), + "tags": obj.Tags, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneAwsServicesAuthentication(o interface{}) *ClusterControlPlaneAwsServicesAuthentication { + if o == nil { + return EmptyClusterControlPlaneAwsServicesAuthentication + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneAwsServicesAuthentication + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneAwsServicesAuthentication{ + RoleArn: dcl.String(obj["role_arn"].(string)), + RoleSessionName: dcl.StringOrNil(obj["role_session_name"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj *ClusterControlPlaneAwsServicesAuthentication) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "role_arn": obj.RoleArn, + "role_session_name": obj.RoleSessionName, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneConfigEncryption(o interface{}) *ClusterControlPlaneConfigEncryption { + if o == nil { + return EmptyClusterControlPlaneConfigEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneConfigEncryption + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneConfigEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneConfigEncryption(obj *ClusterControlPlaneConfigEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneDatabaseEncryption(o interface{}) *ClusterControlPlaneDatabaseEncryption { + if o == nil { + return EmptyClusterControlPlaneDatabaseEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneDatabaseEncryption + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneDatabaseEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj *ClusterControlPlaneDatabaseEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAwsClusterControlPlaneInstancePlacement(o interface{}) *ClusterControlPlaneInstancePlacement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneInstancePlacement{ + Tenancy: ClusterControlPlaneInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneInstancePlacement(obj *ClusterControlPlaneInstancePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "tenancy": obj.Tenancy, + } + + return []interface{}{transformed} + +} + +{{- end }} +func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *ClusterControlPlaneMainVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneMainVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), + VolumeType: ClusterControlPlaneMainVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneMainVolume(obj *ClusterControlPlaneMainVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "throughput": obj.Throughput, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneProxyConfig(o interface{}) *ClusterControlPlaneProxyConfig { + if o == nil { + return EmptyClusterControlPlaneProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneProxyConfig{ + SecretArn: dcl.String(obj["secret_arn"].(string)), + SecretVersion: dcl.String(obj["secret_version"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneProxyConfig(obj *ClusterControlPlaneProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "secret_arn": obj.SecretArn, + "secret_version": obj.SecretVersion, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *ClusterControlPlaneRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneRootVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), + VolumeType: ClusterControlPlaneRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneRootVolume(obj *ClusterControlPlaneRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "throughput": obj.Throughput, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneSshConfig(o interface{}) *ClusterControlPlaneSshConfig { + if o == nil { + return EmptyClusterControlPlaneSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneSshConfig{ + Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneSshConfig(obj *ClusterControlPlaneSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ec2_key_pair": obj.Ec2KeyPair, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterFleet(o interface{}) *ClusterFleet { + if o == nil { + return EmptyClusterFleet + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterFleet + } + obj := objArr[0].(map[string]interface{}) + return &ClusterFleet{ + Project: dcl.StringOrNil(obj["project"].(string)), + } +} + +func flattenContainerAwsClusterFleet(obj *ClusterFleet) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "project": obj.Project, + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterNetworking(o interface{}) *ClusterNetworking { + if o == nil { + return EmptyClusterNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterNetworking + } + obj := objArr[0].(map[string]interface{}) + return &ClusterNetworking{ + PodAddressCidrBlocks: dcl.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: dcl.ExpandStringArray(obj["service_address_cidr_blocks"]), + VPCId: dcl.String(obj["vpc_id"].(string)), + PerNodePoolSgRulesDisabled: dcl.Bool(obj["per_node_pool_sg_rules_disabled"].(bool)), + } +} + +func flattenContainerAwsClusterNetworking(obj *ClusterNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "vpc_id": obj.VPCId, + "per_node_pool_sg_rules_disabled": obj.PerNodePoolSgRulesDisabled, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterBinaryAuthorization(o interface{}) *ClusterBinaryAuthorization { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterBinaryAuthorization{ + EvaluationMode: ClusterBinaryAuthorizationEvaluationModeEnumRef(obj["evaluation_mode"].(string)), + } +} + +func flattenContainerAwsClusterBinaryAuthorization(obj *ClusterBinaryAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "evaluation_mode": obj.EvaluationMode, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAwsClusterLoggingConfig(o interface{}) *ClusterLoggingConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterLoggingConfig{ + ComponentConfig: expandContainerAwsClusterLoggingConfigComponentConfig(obj["component_config"]), + } +} + +func flattenContainerAwsClusterLoggingConfig(obj *ClusterLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "component_config": flattenContainerAwsClusterLoggingConfigComponentConfig(obj.ComponentConfig), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterLoggingConfigComponentConfig(o interface{}) *ClusterLoggingConfigComponentConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterLoggingConfigComponentConfig{ + EnableComponents: expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj["enable_components"]), + } +} + +func flattenContainerAwsClusterLoggingConfigComponentConfig(obj *ClusterLoggingConfigComponentConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_components": flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj.EnableComponents), + } + + return []interface{}{transformed} + +} + +{{- end }} +func flattenContainerAwsClusterWorkloadIdentityConfig(obj *ClusterWorkloadIdentityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "identity_provider": obj.IdentityProvider, + "issuer_uri": obj.IssuerUri, + "workload_pool": obj.WorkloadPool, + } + + return []interface{}{transformed} + +} + +func flattenContainerAwsClusterAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} +{{- if ne $.TargetVersionName "ga" }} + +func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj []ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { + objs := o.([]interface{}) + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl new file mode 100644 index 000000000000..d084a72d39a0 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl @@ -0,0 +1,1021 @@ +package containeraws_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccContainerAwsCluster_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsCluster_BasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAwsCluster_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsCluster_BetaBasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BetaBasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BetaBasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAwsCluster_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + admin_groups { + group = "group@domain.com" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + admin_groups { + group = "group@domain.com" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" +{{- if ne $.TargetVersionName "ga" }} +} + + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + + instance_placement { + tenancy = "DEDICATED" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "updated-%{service_acct}" + } + + instance_placement { + tenancy = "DEDICATED" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + + instance_placement { + tenancy = "dedicated" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "updated-%{service_acct}" + } + + instance_placement { + tenancy = "dedicated" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +{{- end }} +} + + +`, context) +} + +func testAccCheckContainerAwsClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_aws_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containeraws.Cluster{ + AwsRegion: dcl.String(rs.Primary.Attributes["aws_region"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Endpoint: dcl.StringOrNil(rs.Primary.Attributes["endpoint"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containeraws.ClusterStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := containeraws.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetCluster(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_aws_cluster still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl index 49005a50735c..f25424849a69 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_aws_cluster' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AwsCluster' diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl new file mode 100644 index 000000000000..7704fe0b4950 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl @@ -0,0 +1,1379 @@ +package containeraws + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAwsNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAwsNodePoolCreate, + Read: resourceContainerAwsNodePoolRead, + Update: resourceContainerAwsNodePoolUpdate, + Delete: resourceContainerAwsNodePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAwsNodePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, +{{- if ne $.TargetVersionName "ga" }} + dcl.ResourceContainerAwsNodePoolCustomizeDiffFunc, +{{- end }} + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Required: true, + Description: "Autoscaler configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolAutoscalingSchema(), + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The awsCluster for the resource", + }, + + "config": { + Type: schema.TypeList, + Required: true, + Description: "The configuration of the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "max_pods_constraint": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolMaxPodsConstraintSchema(), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The subnet where the node pool node run.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "kubelet_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The kubelet configuration for the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolKubeletConfigSchema(), + }, + + "management": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "The Management configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolManagementSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "update_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Update settings control the speed and disruption of the node pool update.", + MaxItems: 1, + Elem: ContainerAwsNodePoolUpdateSettingsSchema(), + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the node pool.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the node pool.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was last updated.", + }, + }, + } +} + +func ContainerAwsNodePoolAutoscalingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Maximum number of nodes in the NodePool. Must be >= min_node_count.", + }, + + "min_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_encryption": { + Type: schema.TypeList, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigConfigEncryptionSchema(), + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Required: true, + Description: "The name of the AWS IAM role assigned to nodes in the pool.", + }, + + "autoscaling_metrics_collection": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigAutoscalingMetricsCollectionSchema(), + }, + +{{- if ne $.TargetVersionName "ga" }} + "image_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The OS image type to use on node pool instances.", + }, + + "instance_placement": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Details of placement information for an instance.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigInstancePlacementSchema(), + }, + +{{- end }} + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigRootVolumeSchema(), + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + +{{- if ne $.TargetVersionName "ga" }} + "spot_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type`", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSpotConfigSchema(), + }, + +{{- end }} + "ssh_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The SSH configuration.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSshConfigSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "taints": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The initial taints assigned to nodes of this node pool.", + Elem: ContainerAwsNodePoolConfigTaintsSchema(), + }, + }, + } +} + +func ContainerAwsNodePoolConfigConfigEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigAutoscalingMetricsCollectionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "granularity": { + Type: schema.TypeString, + Required: true, + Description: "The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is \"1Minute\".", + }, + + "metrics": { + Type: schema.TypeList, + Optional: true, + Description: "The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigInstancePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tenancy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", +{{- end }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + + "secret_version": { + Type: schema.TypeString, + Required: true, + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigSpotConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_types": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- end }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_key_pair": { + Type: schema.TypeString, + Required: true, + Description: "The name of the EC2 key pair used to login into cluster machines.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigTaintsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE", + }, + + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key for the taint.", + }, + + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Value for the taint.", + }, + }, + } +} + +func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_pods_per_node": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The maximum number of pods to schedule on a single node.", + }, + }, + } +} + +func ContainerAwsNodePoolKubeletConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_cfs_quota": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Whether or not to enable CPU CFS quota. Defaults to true.", + }, + + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + }, + + "cpu_manager_policy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + }, + + "pod_pids_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + }, + }, + } +} + +func ContainerAwsNodePoolManagementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Optional. Whether or not the nodes will be automatically repaired.", + }, + }, + } +} + +func ContainerAwsNodePoolUpdateSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "surge_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Settings for surge update.", + MaxItems: 1, + Elem: ContainerAwsNodePoolUpdateSettingsSurgeSettingsSchema(), + }, + }, + } +} + +func ContainerAwsNodePoolUpdateSettingsSurgeSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_surge": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process.", + }, + + "max_unavailable": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready.", + }, + }, + } +} + +func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAwsNodePoolRead(d, meta) +} + +func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetNodePool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAwsNodePool %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("autoscaling", flattenContainerAwsNodePoolAutoscaling(res.Autoscaling)); err != nil { + return fmt.Errorf("error setting autoscaling in state: %s", err) + } + if err = d.Set("cluster", res.Cluster); err != nil { + return fmt.Errorf("error setting cluster in state: %s", err) + } + if err = d.Set("config", flattenContainerAwsNodePoolConfig(res.Config)); err != nil { + return fmt.Errorf("error setting config in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("max_pods_constraint", flattenContainerAwsNodePoolMaxPodsConstraint(res.MaxPodsConstraint)); err != nil { + return fmt.Errorf("error setting max_pods_constraint in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("subnet_id", res.SubnetId); err != nil { + return fmt.Errorf("error setting subnet_id in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil { + return fmt.Errorf("error setting kubelet_config in state: %s", err) + } + if err = d.Set("management", flattenContainerAwsNodePoolManagement(res.Management)); err != nil { + return fmt.Errorf("error setting management in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("update_settings", flattenContainerAwsNodePoolUpdateSettings(res.UpdateSettings)); err != nil { + return fmt.Errorf("error setting update_settings in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAwsNodePoolAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + directive := dcl.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAwsNodePoolRead(d, meta) +} + +func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + + log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteNodePool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting NodePool %q", d.Id()) + return nil +} + +func resourceContainerAwsNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)/awsNodePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAwsNodePoolAutoscaling(o interface{}) *NodePoolAutoscaling { + if o == nil { + return EmptyNodePoolAutoscaling + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolAutoscaling + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolAutoscaling{ + MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), + MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), + } +} + +func flattenContainerAwsNodePoolAutoscaling(obj *NodePoolAutoscaling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_node_count": obj.MaxNodeCount, + "min_node_count": obj.MinNodeCount, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfig(o interface{}) *NodePoolConfig { + if o == nil { + return EmptyNodePoolConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfig{ + ConfigEncryption: expandContainerAwsNodePoolConfigConfigEncryption(obj["config_encryption"]), + IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), + AutoscalingMetricsCollection: expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj["autoscaling_metrics_collection"]), +{{- if ne $.TargetVersionName "ga" }} + ImageType: dcl.StringOrNil(obj["image_type"].(string)), + InstancePlacement: expandContainerAwsNodePoolConfigInstancePlacement(obj["instance_placement"]), +{{- end }} + InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), + Labels: tpgresource.CheckStringMap(obj["labels"]), + ProxyConfig: expandContainerAwsNodePoolConfigProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAwsNodePoolConfigRootVolume(obj["root_volume"]), + SecurityGroupIds: dcl.ExpandStringArray(obj["security_group_ids"]), +{{- if ne $.TargetVersionName "ga" }} + SpotConfig: expandContainerAwsNodePoolConfigSpotConfig(obj["spot_config"]), +{{- end }} + SshConfig: expandContainerAwsNodePoolConfigSshConfig(obj["ssh_config"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + Taints: expandContainerAwsNodePoolConfigTaintsArray(obj["taints"]), + } +} + +func flattenContainerAwsNodePoolConfig(obj *NodePoolConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "config_encryption": flattenContainerAwsNodePoolConfigConfigEncryption(obj.ConfigEncryption), + "iam_instance_profile": obj.IamInstanceProfile, + "autoscaling_metrics_collection": flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj.AutoscalingMetricsCollection), +{{- if ne $.TargetVersionName "ga" }} + "image_type": obj.ImageType, + "instance_placement": flattenContainerAwsNodePoolConfigInstancePlacement(obj.InstancePlacement), +{{- end }} + "instance_type": obj.InstanceType, + "labels": obj.Labels, + "proxy_config": flattenContainerAwsNodePoolConfigProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAwsNodePoolConfigRootVolume(obj.RootVolume), + "security_group_ids": obj.SecurityGroupIds, +{{- if ne $.TargetVersionName "ga" }} + "spot_config": flattenContainerAwsNodePoolConfigSpotConfig(obj.SpotConfig), +{{- end }} + "ssh_config": flattenContainerAwsNodePoolConfigSshConfig(obj.SshConfig), + "tags": obj.Tags, + "taints": flattenContainerAwsNodePoolConfigTaintsArray(obj.Taints), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigConfigEncryption(o interface{}) *NodePoolConfigConfigEncryption { + if o == nil { + return EmptyNodePoolConfigConfigEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigConfigEncryption + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigConfigEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigConfigEncryption(obj *NodePoolConfigConfigEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(o interface{}) *NodePoolConfigAutoscalingMetricsCollection { + if o == nil { + return EmptyNodePoolConfigAutoscalingMetricsCollection + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigAutoscalingMetricsCollection + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigAutoscalingMetricsCollection{ + Granularity: dcl.String(obj["granularity"].(string)), + Metrics: dcl.ExpandStringArray(obj["metrics"]), + } +} + +func flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj *NodePoolConfigAutoscalingMetricsCollection) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "granularity": obj.Granularity, + "metrics": obj.Metrics, +{{- if ne $.TargetVersionName "ga" }} + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigInstancePlacement(o interface{}) *NodePoolConfigInstancePlacement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigInstancePlacement{ + Tenancy: NodePoolConfigInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigInstancePlacement(obj *NodePoolConfigInstancePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "tenancy": obj.Tenancy, +{{- end }} + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigProxyConfig(o interface{}) *NodePoolConfigProxyConfig { + if o == nil { + return EmptyNodePoolConfigProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigProxyConfig{ + SecretArn: dcl.String(obj["secret_arn"].(string)), + SecretVersion: dcl.String(obj["secret_version"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigProxyConfig(obj *NodePoolConfigProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "secret_arn": obj.SecretArn, + "secret_version": obj.SecretVersion, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *NodePoolConfigRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigRootVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), + VolumeType: NodePoolConfigRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigRootVolume(obj *NodePoolConfigRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "throughput": obj.Throughput, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAwsNodePoolConfigSpotConfig(o interface{}) *NodePoolConfigSpotConfig { + if o == nil { + return EmptyNodePoolConfigSpotConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigSpotConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigSpotConfig{ + InstanceTypes: dcl.ExpandStringArray(obj["instance_types"]), + } +} + +func flattenContainerAwsNodePoolConfigSpotConfig(obj *NodePoolConfigSpotConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_types": obj.InstanceTypes, + } + + return []interface{}{transformed} + +} + +{{- end }} +func expandContainerAwsNodePoolConfigSshConfig(o interface{}) *NodePoolConfigSshConfig { + if o == nil { + return EmptyNodePoolConfigSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigSshConfig{ + Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigSshConfig(obj *NodePoolConfigSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ec2_key_pair": obj.Ec2KeyPair, + } + + return []interface{}{transformed} + +} +func expandContainerAwsNodePoolConfigTaintsArray(o interface{}) []NodePoolConfigTaints { + if o == nil { + return make([]NodePoolConfigTaints, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]NodePoolConfigTaints, 0) + } + + items := make([]NodePoolConfigTaints, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsNodePoolConfigTaints(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsNodePoolConfigTaints(o interface{}) *NodePoolConfigTaints { + if o == nil { + return EmptyNodePoolConfigTaints + } + + obj := o.(map[string]interface{}) + return &NodePoolConfigTaints{ + Effect: NodePoolConfigTaintsEffectEnumRef(obj["effect"].(string)), + Key: dcl.String(obj["key"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigTaintsArray(objs []NodePoolConfigTaints) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsNodePoolConfigTaints(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsNodePoolConfigTaints(obj *NodePoolConfigTaints) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "effect": obj.Effect, + "key": obj.Key, + "value": obj.Value, + } + + return transformed + +} + +func expandContainerAwsNodePoolMaxPodsConstraint(o interface{}) *NodePoolMaxPodsConstraint { + if o == nil { + return EmptyNodePoolMaxPodsConstraint + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolMaxPodsConstraint + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolMaxPodsConstraint{ + MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), + } +} + +func flattenContainerAwsNodePoolMaxPodsConstraint(obj *NodePoolMaxPodsConstraint) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_pods_per_node": obj.MaxPodsPerNode, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolKubeletConfig(o interface{}) *NodePoolKubeletConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolKubeletConfig{ + CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)), + CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)), + CpuManagerPolicy: NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)), + PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))), + } +} + +func flattenContainerAwsNodePoolKubeletConfig(obj *NodePoolKubeletConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu_cfs_quota": obj.CpuCfsQuota, + "cpu_cfs_quota_period": obj.CpuCfsQuotaPeriod, + "cpu_manager_policy": obj.CpuManagerPolicy, + "pod_pids_limit": obj.PodPidsLimit, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolManagement(o interface{}) *NodePoolManagement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolManagement{ + AutoRepair: dcl.Bool(obj["auto_repair"].(bool)), + } +} + +func flattenContainerAwsNodePoolManagement(obj *NodePoolManagement) interface{} { + if obj == nil { + return nil + } + transformed := make(map[string]interface{}) + + if obj.AutoRepair == nil || obj.Empty() { + transformed["auto_repair"] = false + } else { + transformed["auto_repair"] = obj.AutoRepair + } + + return []interface{}{transformed} +} + +func expandContainerAwsNodePoolUpdateSettings(o interface{}) *NodePoolUpdateSettings { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolUpdateSettings{ + SurgeSettings: expandContainerAwsNodePoolUpdateSettingsSurgeSettings(obj["surge_settings"]), + } +} + +func flattenContainerAwsNodePoolUpdateSettings(obj *NodePoolUpdateSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "surge_settings": flattenContainerAwsNodePoolUpdateSettingsSurgeSettings(obj.SurgeSettings), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolUpdateSettingsSurgeSettings(o interface{}) *NodePoolUpdateSettingsSurgeSettings { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolUpdateSettingsSurgeSettings{ + MaxSurge: dcl.Int64OrNil(int64(obj["max_surge"].(int))), + MaxUnavailable: dcl.Int64OrNil(int64(obj["max_unavailable"].(int))), + } +} + +func flattenContainerAwsNodePoolUpdateSettingsSurgeSettings(obj *NodePoolUpdateSettingsSurgeSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_surge": obj.MaxSurge, + "max_unavailable": obj.MaxUnavailable, + } + + return []interface{}{transformed} + +} + +func flattenContainerAwsNodePoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl new file mode 100644 index 000000000000..29ff7e1aa9ec --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl @@ -0,0 +1,1582 @@ +package containeraws_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccContainerAwsNodePool_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsNodePool_BasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAwsNodePool_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsNodePool_BetaBasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BetaBasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BetaBasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAwsNodePool_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + management { + auto_repair = true + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + management { + auto_repair = false + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } +{{- if ne $.TargetVersionName "ga" }} + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "DEDICATED" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + management { + auto_repair = true + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + annotations = { + label-one = "value-one" + } + + update_settings { + surge_settings { + max_surge = 1 + max_unavailable = 0 + } + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "DEDICATED" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + management { + auto_repair = false + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + annotations = { + label-two = "value-two" + } + + update_settings { + surge_settings { + max_surge = 1 + max_unavailable = 0 + } + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "dedicated" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "dedicated" + } + + image_type = "ubuntu" +{{- end }} + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAwsNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_aws_node_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containeraws.NodePool{ + Cluster: dcl.String(rs.Primary.Attributes["cluster"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + SubnetId: dcl.String(rs.Primary.Attributes["subnet_id"]), + Version: dcl.String(rs.Primary.Attributes["version"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containeraws.NodePoolStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := containeraws.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetNodePool(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_aws_node_pool still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl index 68a119049c43..17c7c8cf1cbf 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_aws_node_pool' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AwsNodePool' diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl new file mode 100644 index 000000000000..0ffd0569a6c8 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl @@ -0,0 +1,366 @@ +package containerazure + +import ( + "context" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type AzureClient struct { + Name *string `json:"name"` + TenantId *string `json:"tenantId"` + ApplicationId *string `json:"applicationId"` + Certificate *string `json:"certificate"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *AzureClient) String() string { + return dcl.SprintResource(r) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *AzureClient) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_azure", + Type: "Client", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containerazure", +{{- end }} + } +} + +func (r *AzureClient) ID() (string, error) { + if err := extractClientFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "tenant_id": dcl.ValueOrEmptyString(nr.TenantId), + "application_id": dcl.ValueOrEmptyString(nr.ApplicationId), + "certificate": dcl.ValueOrEmptyString(nr.Certificate), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClientMaxPage = -1 + +type ClientList struct { + Items []*AzureClient + + nextToken string + + pageSize int32 + + resource *AzureClient +} + +func (l *ClientList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClientList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listClient(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListClient(ctx context.Context, project, location string) (*ClientList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClientWithMaxResults(ctx, project, location, ClientMaxPage) + +} + +func (c *Client) ListClientWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClientList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &AzureClient{ + Project: &project, + Location: &location, + } + items, token, err := c.listClient(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClientList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetClient(ctx context.Context, r *AzureClient) (*AzureClient, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClientFields(r) + + b, err := c.getClientRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalClient(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClientNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClientFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteClient(ctx context.Context, r *AzureClient) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Client resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Client...") + deleteOp := deleteClientOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllClient deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllClient(ctx context.Context, project, location string, filter func(*AzureClient) bool) error { + listObj, err := c.ListClient(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllClient(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllClient(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyClient(ctx context.Context, rawDesired *AzureClient, opts ...dcl.ApplyOption) (*AzureClient, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *AzureClient + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClientHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClientHelper(c *Client, ctx context.Context, rawDesired *AzureClient, opts ...dcl.ApplyOption) (*AzureClient, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyClient...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClientFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clientDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClientDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clientApiOperation + if create { + ops = append(ops, &createClientOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClientDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClientDiff(c *Client, ctx context.Context, desired *AzureClient, rawDesired *AzureClient, ops []clientApiOperation, opts ...dcl.ApplyOption) (*AzureClient, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetClient(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClientOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapClient(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClientNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClientNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClientDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClientFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClientFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffClient(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go b/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go new file mode 100644 index 000000000000..091124d42324 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go @@ -0,0 +1,715 @@ +package containerazure + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *AzureClient) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "tenantId"); err != nil { + return err + } + if err := dcl.Required(r, "applicationId"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + return nil +} +func (r *AzureClient) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{location}}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *AzureClient) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *AzureClient) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients", nr.basePath(), userBasePath, params), nil + +} + +func (r *AzureClient) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients?azureClientId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *AzureClient) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// clientApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clientApiOperation interface { + do(context.Context, *AzureClient, *Client) error +} + +func (c *Client) listClientRaw(ctx context.Context, r *AzureClient, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClientMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClientOperation struct { + AzureClients []map[string]interface{} `json:"azureClients"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listClient(ctx context.Context, r *AzureClient, pageToken string, pageSize int32) ([]*AzureClient, string, error) { + b, err := c.listClientRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClientOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*AzureClient + for _, v := range m.AzureClients { + res, err := unmarshalMapClient(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllClient(ctx context.Context, f func(*AzureClient) bool, resources []*AzureClient) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteClient(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClientOperation struct{} + +func (op *deleteClientOperation) do(ctx context.Context, r *AzureClient, c *Client) error { + r, err := c.GetClient(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Client not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetClient checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetClient(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClientOperation struct { + response map[string]interface{} +} + +func (op *createClientOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClientOperation) do(ctx context.Context, r *AzureClient, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetClient(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClientRaw(ctx context.Context, r *AzureClient) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clientDiffsForRawDesired(ctx context.Context, rawDesired *AzureClient, opts ...dcl.ApplyOption) (initial, desired *AzureClient, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *AzureClient + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*AzureClient); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected AzureClient, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetClient(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Client resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Client resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Client resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClientDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Client: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Client: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClientFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClientInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Client: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClientDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Client: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffClient(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClientInitialState(rawInitial, rawDesired *AzureClient) (*AzureClient, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClientDesiredState(rawDesired, rawInitial *AzureClient, opts ...dcl.ApplyOption) (*AzureClient, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + + return rawDesired, nil + } + canonicalDesired := &AzureClient{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.TenantId, rawInitial.TenantId) { + canonicalDesired.TenantId = rawInitial.TenantId + } else { + canonicalDesired.TenantId = rawDesired.TenantId + } + if dcl.StringCanonicalize(rawDesired.ApplicationId, rawInitial.ApplicationId) { + canonicalDesired.ApplicationId = rawInitial.ApplicationId + } else { + canonicalDesired.ApplicationId = rawDesired.ApplicationId + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeClientNewState(c *Client, rawNew, rawDesired *AzureClient) (*AzureClient, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.TenantId) && dcl.IsEmptyValueIndirect(rawDesired.TenantId) { + rawNew.TenantId = rawDesired.TenantId + } else { + if dcl.StringCanonicalize(rawDesired.TenantId, rawNew.TenantId) { + rawNew.TenantId = rawDesired.TenantId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ApplicationId) && dcl.IsEmptyValueIndirect(rawDesired.ApplicationId) { + rawNew.ApplicationId = rawDesired.ApplicationId + } else { + if dcl.StringCanonicalize(rawDesired.ApplicationId, rawNew.ApplicationId) { + rawNew.ApplicationId = rawDesired.ApplicationId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Certificate) && dcl.IsEmptyValueIndirect(rawDesired.Certificate) { + rawNew.Certificate = rawDesired.Certificate + } else { + if dcl.StringCanonicalize(rawDesired.Certificate, rawNew.Certificate) { + rawNew.Certificate = rawDesired.Certificate + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffClient(c *Client, desired, actual *AzureClient, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.TenantId, actual.TenantId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TenantId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ApplicationId, actual.ApplicationId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ApplicationId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Certificate, actual.Certificate, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PemCertificate")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *AzureClient) urlNormalized() *AzureClient { + normalized := dcl.Copy(*r).(AzureClient) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.TenantId = dcl.SelfLinkToName(r.TenantId) + normalized.ApplicationId = dcl.SelfLinkToName(r.ApplicationId) + normalized.Certificate = dcl.SelfLinkToName(r.Certificate) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *AzureClient) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Client resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *AzureClient) marshal(c *Client) ([]byte, error) { + m, err := expandClient(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Client: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalClient decodes JSON responses into the Client resource schema. +func unmarshalClient(b []byte, c *Client, res *AzureClient) (*AzureClient, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapClient(m, c, res) +} + +func unmarshalMapClient(m map[string]interface{}, c *Client, res *AzureClient) (*AzureClient, error) { + + flattened := flattenClient(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandClient expands Client into a JSON request object. +func expandClient(c *Client, f *AzureClient) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/azureClients/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.TenantId; dcl.ValueShouldBeSent(v) { + m["tenantId"] = v + } + if v := f.ApplicationId; dcl.ValueShouldBeSent(v) { + m["applicationId"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenClient flattens Client from a JSON request object into the +// Client type. +func flattenClient(c *Client, i interface{}, res *AzureClient) *AzureClient { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &AzureClient{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.TenantId = dcl.FlattenString(m["tenantId"]) + resultRes.ApplicationId = dcl.FlattenString(m["applicationId"]) + resultRes.Certificate = dcl.FlattenString(m["pemCertificate"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *AzureClient) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalClient(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clientDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clientApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClientDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clientDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clientDiff + // For each operation name, create a clientDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clientDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClientApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClientApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clientApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClientFields(r *AzureClient) error { + return nil +} + +func postReadExtractClientFields(r *AzureClient) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/client.go b/mmv1/third_party/terraform/services/containerazure/client.go new file mode 100644 index 000000000000..2e8b8e7a19f8 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/client.go @@ -0,0 +1,18 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl new file mode 100644 index 000000000000..db6508893347 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl @@ -0,0 +1,1342 @@ +package containerazure + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Cluster struct { + Name *string `json:"name"` + Description *string `json:"description"` + AzureRegion *string `json:"azureRegion"` + ResourceGroupId *string `json:"resourceGroupId"` + Client *string `json:"client"` + AzureServicesAuthentication *ClusterAzureServicesAuthentication `json:"azureServicesAuthentication"` + Networking *ClusterNetworking `json:"networking"` + ControlPlane *ClusterControlPlane `json:"controlPlane"` + Authorization *ClusterAuthorization `json:"authorization"` + State *ClusterStateEnum `json:"state"` + Endpoint *string `json:"endpoint"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + WorkloadIdentityConfig *ClusterWorkloadIdentityConfig `json:"workloadIdentityConfig"` + Project *string `json:"project"` + Location *string `json:"location"` + Fleet *ClusterFleet `json:"fleet"` +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig *ClusterLoggingConfig `json:"loggingConfig"` + MonitoringConfig *ClusterMonitoringConfig `json:"monitoringConfig"` +{{- end }} +} + +func (r *Cluster) String() string { + return dcl.SprintResource(r) +} + +// The enum ClusterStateEnum. +type ClusterStateEnum string + +// ClusterStateEnumRef returns a *ClusterStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStateEnumRef(s string) *ClusterStateEnum { + v := ClusterStateEnum(s) + return &v +} + +func (v ClusterStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStateEnum", +{{- if ne $.TargetVersionName "ga" }} + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterLoggingConfigComponentConfigEnableComponentsEnum. +type ClusterLoggingConfigComponentConfigEnableComponentsEnum string + +// ClusterLoggingConfigComponentConfigEnableComponentsEnumRef returns a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s string) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + v := ClusterLoggingConfigComponentConfigEnableComponentsEnum(s) + return &v +} + +func (v ClusterLoggingConfigComponentConfigEnableComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "SYSTEM_COMPONENTS", "WORKLOADS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", +{{- end }} + Value: string(v), + Valid: []string{}, + } +} + +type ClusterAzureServicesAuthentication struct { + empty bool `json:"-"` + TenantId *string `json:"tenantId"` + ApplicationId *string `json:"applicationId"` +} + +type jsonClusterAzureServicesAuthentication ClusterAzureServicesAuthentication + +func (r *ClusterAzureServicesAuthentication) UnmarshalJSON(data []byte) error { + var res jsonClusterAzureServicesAuthentication + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAzureServicesAuthentication + } else { + + r.TenantId = res.TenantId + + r.ApplicationId = res.ApplicationId + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAzureServicesAuthentication is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAzureServicesAuthentication *ClusterAzureServicesAuthentication = &ClusterAzureServicesAuthentication{empty: true} + +func (r *ClusterAzureServicesAuthentication) Empty() bool { + return r.empty +} + +func (r *ClusterAzureServicesAuthentication) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAzureServicesAuthentication) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterNetworking struct { + empty bool `json:"-"` + VirtualNetworkId *string `json:"virtualNetworkId"` + PodAddressCidrBlocks []string `json:"podAddressCidrBlocks"` + ServiceAddressCidrBlocks []string `json:"serviceAddressCidrBlocks"` +} + +type jsonClusterNetworking ClusterNetworking + +func (r *ClusterNetworking) UnmarshalJSON(data []byte) error { + var res jsonClusterNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterNetworking + } else { + + r.VirtualNetworkId = res.VirtualNetworkId + + r.PodAddressCidrBlocks = res.PodAddressCidrBlocks + + r.ServiceAddressCidrBlocks = res.ServiceAddressCidrBlocks + + } + return nil +} + +// This object is used to assert a desired state where this ClusterNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterNetworking *ClusterNetworking = &ClusterNetworking{empty: true} + +func (r *ClusterNetworking) Empty() bool { + return r.empty +} + +func (r *ClusterNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlane struct { + empty bool `json:"-"` + Version *string `json:"version"` + SubnetId *string `json:"subnetId"` + VmSize *string `json:"vmSize"` + SshConfig *ClusterControlPlaneSshConfig `json:"sshConfig"` + RootVolume *ClusterControlPlaneRootVolume `json:"rootVolume"` + MainVolume *ClusterControlPlaneMainVolume `json:"mainVolume"` + DatabaseEncryption *ClusterControlPlaneDatabaseEncryption `json:"databaseEncryption"` + Tags map[string]string `json:"tags"` + ProxyConfig *ClusterControlPlaneProxyConfig `json:"proxyConfig"` + ReplicaPlacements []ClusterControlPlaneReplicaPlacements `json:"replicaPlacements"` +} + +type jsonClusterControlPlane ClusterControlPlane + +func (r *ClusterControlPlane) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlane + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlane + } else { + + r.Version = res.Version + + r.SubnetId = res.SubnetId + + r.VmSize = res.VmSize + + r.SshConfig = res.SshConfig + + r.RootVolume = res.RootVolume + + r.MainVolume = res.MainVolume + + r.DatabaseEncryption = res.DatabaseEncryption + + r.Tags = res.Tags + + r.ProxyConfig = res.ProxyConfig + + r.ReplicaPlacements = res.ReplicaPlacements + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlane is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlane *ClusterControlPlane = &ClusterControlPlane{empty: true} + +func (r *ClusterControlPlane) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlane) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlane) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneSshConfig struct { + empty bool `json:"-"` + AuthorizedKey *string `json:"authorizedKey"` +} + +type jsonClusterControlPlaneSshConfig ClusterControlPlaneSshConfig + +func (r *ClusterControlPlaneSshConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneSshConfig + } else { + + r.AuthorizedKey = res.AuthorizedKey + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneSshConfig *ClusterControlPlaneSshConfig = &ClusterControlPlaneSshConfig{empty: true} + +func (r *ClusterControlPlaneSshConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` +} + +type jsonClusterControlPlaneRootVolume ClusterControlPlaneRootVolume + +func (r *ClusterControlPlaneRootVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneRootVolume + } else { + + r.SizeGib = res.SizeGib + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneRootVolume *ClusterControlPlaneRootVolume = &ClusterControlPlaneRootVolume{empty: true} + +func (r *ClusterControlPlaneRootVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneMainVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` +} + +type jsonClusterControlPlaneMainVolume ClusterControlPlaneMainVolume + +func (r *ClusterControlPlaneMainVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneMainVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneMainVolume + } else { + + r.SizeGib = res.SizeGib + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneMainVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneMainVolume *ClusterControlPlaneMainVolume = &ClusterControlPlaneMainVolume{empty: true} + +func (r *ClusterControlPlaneMainVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneMainVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneMainVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneDatabaseEncryption struct { + empty bool `json:"-"` + KeyId *string `json:"keyId"` +} + +type jsonClusterControlPlaneDatabaseEncryption ClusterControlPlaneDatabaseEncryption + +func (r *ClusterControlPlaneDatabaseEncryption) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneDatabaseEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneDatabaseEncryption + } else { + + r.KeyId = res.KeyId + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneDatabaseEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneDatabaseEncryption *ClusterControlPlaneDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{empty: true} + +func (r *ClusterControlPlaneDatabaseEncryption) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneDatabaseEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneDatabaseEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneProxyConfig struct { + empty bool `json:"-"` + ResourceGroupId *string `json:"resourceGroupId"` + SecretId *string `json:"secretId"` +} + +type jsonClusterControlPlaneProxyConfig ClusterControlPlaneProxyConfig + +func (r *ClusterControlPlaneProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneProxyConfig + } else { + + r.ResourceGroupId = res.ResourceGroupId + + r.SecretId = res.SecretId + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneProxyConfig *ClusterControlPlaneProxyConfig = &ClusterControlPlaneProxyConfig{empty: true} + +func (r *ClusterControlPlaneProxyConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneProxyConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneReplicaPlacements struct { + empty bool `json:"-"` + SubnetId *string `json:"subnetId"` + AzureAvailabilityZone *string `json:"azureAvailabilityZone"` +} + +type jsonClusterControlPlaneReplicaPlacements ClusterControlPlaneReplicaPlacements + +func (r *ClusterControlPlaneReplicaPlacements) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneReplicaPlacements + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneReplicaPlacements + } else { + + r.SubnetId = res.SubnetId + + r.AzureAvailabilityZone = res.AzureAvailabilityZone + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneReplicaPlacements is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneReplicaPlacements *ClusterControlPlaneReplicaPlacements = &ClusterControlPlaneReplicaPlacements{empty: true} + +func (r *ClusterControlPlaneReplicaPlacements) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneReplicaPlacements) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneReplicaPlacements) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorization struct { + empty bool `json:"-"` + AdminUsers []ClusterAuthorizationAdminUsers `json:"adminUsers"` + AdminGroups []ClusterAuthorizationAdminGroups `json:"adminGroups"` +} + +type jsonClusterAuthorization ClusterAuthorization + +func (r *ClusterAuthorization) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorization + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorization + } else { + + r.AdminUsers = res.AdminUsers + + r.AdminGroups = res.AdminGroups + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorization is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorization *ClusterAuthorization = &ClusterAuthorization{empty: true} + +func (r *ClusterAuthorization) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorization) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorization) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminUsers struct { + empty bool `json:"-"` + Username *string `json:"username"` +} + +type jsonClusterAuthorizationAdminUsers ClusterAuthorizationAdminUsers + +func (r *ClusterAuthorizationAdminUsers) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminUsers + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminUsers + } else { + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminUsers is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminUsers *ClusterAuthorizationAdminUsers = &ClusterAuthorizationAdminUsers{empty: true} + +func (r *ClusterAuthorizationAdminUsers) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminUsers) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminUsers) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminGroups struct { + empty bool `json:"-"` + Group *string `json:"group"` +} + +type jsonClusterAuthorizationAdminGroups ClusterAuthorizationAdminGroups + +func (r *ClusterAuthorizationAdminGroups) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminGroups + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminGroups + } else { + + r.Group = res.Group + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminGroups is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminGroups *ClusterAuthorizationAdminGroups = &ClusterAuthorizationAdminGroups{empty: true} + +func (r *ClusterAuthorizationAdminGroups) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminGroups) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminGroups) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterWorkloadIdentityConfig struct { + empty bool `json:"-"` + IssuerUri *string `json:"issuerUri"` + WorkloadPool *string `json:"workloadPool"` + IdentityProvider *string `json:"identityProvider"` +} + +type jsonClusterWorkloadIdentityConfig ClusterWorkloadIdentityConfig + +func (r *ClusterWorkloadIdentityConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterWorkloadIdentityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterWorkloadIdentityConfig + } else { + + r.IssuerUri = res.IssuerUri + + r.WorkloadPool = res.WorkloadPool + + r.IdentityProvider = res.IdentityProvider + + } + return nil +} + +// This object is used to assert a desired state where this ClusterWorkloadIdentityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterWorkloadIdentityConfig *ClusterWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{empty: true} + +func (r *ClusterWorkloadIdentityConfig) Empty() bool { + return r.empty +} + +func (r *ClusterWorkloadIdentityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterWorkloadIdentityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterFleet struct { + empty bool `json:"-"` + Project *string `json:"project"` + Membership *string `json:"membership"` +} + +type jsonClusterFleet ClusterFleet + +func (r *ClusterFleet) UnmarshalJSON(data []byte) error { + var res jsonClusterFleet + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterFleet + } else { + + r.Project = res.Project + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this ClusterFleet is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterFleet *ClusterFleet = &ClusterFleet{empty: true} + +func (r *ClusterFleet) Empty() bool { + return r.empty +} + +func (r *ClusterFleet) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterFleet) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type ClusterLoggingConfig struct { + empty bool `json:"-"` + ComponentConfig *ClusterLoggingConfigComponentConfig `json:"componentConfig"` +} + +type jsonClusterLoggingConfig ClusterLoggingConfig + +func (r *ClusterLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfig + } else { + + r.ComponentConfig = res.ComponentConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfig *ClusterLoggingConfig = &ClusterLoggingConfig{empty: true} + +func (r *ClusterLoggingConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterLoggingConfigComponentConfig struct { + empty bool `json:"-"` + EnableComponents []ClusterLoggingConfigComponentConfigEnableComponentsEnum `json:"enableComponents"` +} + +type jsonClusterLoggingConfigComponentConfig ClusterLoggingConfigComponentConfig + +func (r *ClusterLoggingConfigComponentConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfigComponentConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfigComponentConfig + } else { + + r.EnableComponents = res.EnableComponents + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfigComponentConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfigComponentConfig *ClusterLoggingConfigComponentConfig = &ClusterLoggingConfigComponentConfig{empty: true} + +func (r *ClusterLoggingConfigComponentConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfigComponentConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfigComponentConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfig struct { + empty bool `json:"-"` + ManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig `json:"managedPrometheusConfig"` +} + +type jsonClusterMonitoringConfig ClusterMonitoringConfig + +func (r *ClusterMonitoringConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfig + } else { + + r.ManagedPrometheusConfig = res.ManagedPrometheusConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfig *ClusterMonitoringConfig = &ClusterMonitoringConfig{empty: true} + +func (r *ClusterMonitoringConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfigManagedPrometheusConfig struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` +} + +type jsonClusterMonitoringConfigManagedPrometheusConfig ClusterMonitoringConfigManagedPrometheusConfig + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfigManagedPrometheusConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfigManagedPrometheusConfig + } else { + + r.Enabled = res.Enabled + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfigManagedPrometheusConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfigManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{empty: true} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Cluster) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_azure", + Type: "Cluster", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containerazure", +{{- end }} + } +} + +func (r *Cluster) ID() (string, error) { + if err := extractClusterFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "description": dcl.ValueOrEmptyString(nr.Description), + "azure_region": dcl.ValueOrEmptyString(nr.AzureRegion), + "resource_group_id": dcl.ValueOrEmptyString(nr.ResourceGroupId), + "client": dcl.ValueOrEmptyString(nr.Client), + "azure_services_authentication": dcl.ValueOrEmptyString(nr.AzureServicesAuthentication), + "networking": dcl.ValueOrEmptyString(nr.Networking), + "control_plane": dcl.ValueOrEmptyString(nr.ControlPlane), + "authorization": dcl.ValueOrEmptyString(nr.Authorization), + "state": dcl.ValueOrEmptyString(nr.State), + "endpoint": dcl.ValueOrEmptyString(nr.Endpoint), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "workload_identity_config": dcl.ValueOrEmptyString(nr.WorkloadIdentityConfig), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "fleet": dcl.ValueOrEmptyString(nr.Fleet), +{{- if ne $.TargetVersionName "ga" }} + "logging_config": dcl.ValueOrEmptyString(nr.LoggingConfig), + "monitoring_config": dcl.ValueOrEmptyString(nr.MonitoringConfig), +{{- end }} + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClusterMaxPage = -1 + +type ClusterList struct { + Items []*Cluster + + nextToken string + + pageSize int32 + + resource *Cluster +} + +func (l *ClusterList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClusterList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listCluster(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListCluster(ctx context.Context, project, location string) (*ClusterList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClusterWithMaxResults(ctx, project, location, ClusterMaxPage) + +} + +func (c *Client) ListClusterWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClusterList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Cluster{ + Project: &project, + Location: &location, + } + items, token, err := c.listCluster(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClusterList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClusterFields(r) + + b, err := c.getClusterRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalCluster(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClusterNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClusterFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteCluster(ctx context.Context, r *Cluster) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Cluster resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Cluster...") + deleteOp := deleteClusterOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllCluster deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error { + listObj, err := c.ListCluster(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyCluster(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Cluster + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClusterHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClusterHelper(c *Client, ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyCluster...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClusterFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clusterDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClusterDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clusterApiOperation + if create { + ops = append(ops, &createClusterOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClusterDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClusterDiff(c *Client, ctx context.Context, desired *Cluster, rawDesired *Cluster, ops []clusterApiOperation, opts ...dcl.ApplyOption) (*Cluster, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetCluster(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClusterOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapCluster(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClusterNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClusterNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClusterDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClusterFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClusterFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffCluster(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl new file mode 100644 index 000000000000..c5f544c0da29 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl @@ -0,0 +1,6926 @@ +package containerazure + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Cluster) validate() error { + + if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"Client", "AzureServicesAuthentication"}, r.Client, r.AzureServicesAuthentication); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "azureRegion"); err != nil { + return err + } + if err := dcl.Required(r, "resourceGroupId"); err != nil { + return err + } + if err := dcl.Required(r, "networking"); err != nil { + return err + } + if err := dcl.Required(r, "controlPlane"); err != nil { + return err + } + if err := dcl.Required(r, "authorization"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.Required(r, "fleet"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.AzureServicesAuthentication) { + if err := r.AzureServicesAuthentication.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Networking) { + if err := r.Networking.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ControlPlane) { + if err := r.ControlPlane.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Authorization) { + if err := r.Authorization.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkloadIdentityConfig) { + if err := r.WorkloadIdentityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Fleet) { + if err := r.Fleet.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MonitoringConfig) { + if err := r.MonitoringConfig.validate(); err != nil { + return err + } + } +{{- end }} + return nil +} +func (r *ClusterAzureServicesAuthentication) validate() error { + if err := dcl.Required(r, "tenantId"); err != nil { + return err + } + if err := dcl.Required(r, "applicationId"); err != nil { + return err + } + return nil +} +func (r *ClusterNetworking) validate() error { + if err := dcl.Required(r, "virtualNetworkId"); err != nil { + return err + } + if err := dcl.Required(r, "podAddressCidrBlocks"); err != nil { + return err + } + if err := dcl.Required(r, "serviceAddressCidrBlocks"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlane) validate() error { + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "sshConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MainVolume) { + if err := r.MainVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DatabaseEncryption) { + if err := r.DatabaseEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterControlPlaneSshConfig) validate() error { + if err := dcl.Required(r, "authorizedKey"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneRootVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneMainVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneDatabaseEncryption) validate() error { + if err := dcl.Required(r, "keyId"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneProxyConfig) validate() error { + if err := dcl.Required(r, "resourceGroupId"); err != nil { + return err + } + if err := dcl.Required(r, "secretId"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneReplicaPlacements) validate() error { + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "azureAvailabilityZone"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorization) validate() error { + if err := dcl.Required(r, "adminUsers"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminUsers) validate() error { + if err := dcl.Required(r, "username"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminGroups) validate() error { + if err := dcl.Required(r, "group"); err != nil { + return err + } + return nil +} +func (r *ClusterWorkloadIdentityConfig) validate() error { + return nil +} +func (r *ClusterFleet) validate() error { + if err := dcl.Required(r, "project"); err != nil { + return err + } +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func (r *ClusterLoggingConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ComponentConfig) { + if err := r.ComponentConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterLoggingConfigComponentConfig) validate() error { + return nil +} +func (r *ClusterMonitoringConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ManagedPrometheusConfig) { + if err := r.ManagedPrometheusConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterMonitoringConfigManagedPrometheusConfig) validate() error { +{{- end }} + return nil +} +func (r *Cluster) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *Cluster) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Cluster) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters?azureClusterId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// clusterApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clusterApiOperation interface { + do(context.Context, *Cluster, *Client) error +} + +// newUpdateClusterUpdateAzureClusterRequest creates a request for an +// Cluster resource's UpdateAzureCluster update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateClusterUpdateAzureClusterRequest(ctx context.Context, f *Cluster, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.Client; !dcl.IsEmptyValueIndirect(v) { + req["azureClient"] = v + } + if v, err := expandClusterAzureServicesAuthentication(c, f.AzureServicesAuthentication, res); err != nil { + return nil, fmt.Errorf("error expanding AzureServicesAuthentication into azureServicesAuthentication: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["azureServicesAuthentication"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["authorization"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["monitoringConfig"] = v +{{- end }} + } + b, err := c.getClusterRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateClusterUpdateAzureClusterRequest converts the update into +// the final JSON request body. +func marshalUpdateClusterUpdateAzureClusterRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateClusterUpdateAzureClusterOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateClusterUpdateAzureClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + _, err := c.GetCluster(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAzureCluster") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateClusterUpdateAzureClusterRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateClusterUpdateAzureClusterRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listClusterRaw(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClusterMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClusterOperation struct { + AzureClusters []map[string]interface{} `json:"azureClusters"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listCluster(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]*Cluster, string, error) { + b, err := c.listClusterRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClusterOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Cluster + for _, v := range m.AzureClusters { + res, err := unmarshalMapCluster(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllCluster(ctx context.Context, f func(*Cluster) bool, resources []*Cluster) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteCluster(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClusterOperation struct{} + +func (op *deleteClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + r, err := c.GetCluster(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Cluster not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetCluster checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetCluster(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClusterOperation struct { + response map[string]interface{} +} + +func (op *createClusterOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetCluster(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClusterRaw(ctx context.Context, r *Cluster) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clusterDiffsForRawDesired(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (initial, desired *Cluster, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Cluster + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Cluster); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Cluster, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetCluster(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Cluster resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Cluster resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Cluster resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Cluster: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Cluster: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClusterFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClusterInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Cluster: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Cluster: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffCluster(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClusterInitialState(rawInitial, rawDesired *Cluster) (*Cluster, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.Client) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.AzureServicesAuthentication) { + rawInitial.Client = dcl.String("") + } + } + + if !dcl.IsZeroValue(rawInitial.AzureServicesAuthentication) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Client) { + rawInitial.AzureServicesAuthentication = EmptyClusterAzureServicesAuthentication + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClusterDesiredState(rawDesired, rawInitial *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.AzureServicesAuthentication = canonicalizeClusterAzureServicesAuthentication(rawDesired.AzureServicesAuthentication, nil, opts...) + rawDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, nil, opts...) + rawDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, nil, opts...) + rawDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, nil, opts...) + rawDesired.WorkloadIdentityConfig = canonicalizeClusterWorkloadIdentityConfig(rawDesired.WorkloadIdentityConfig, nil, opts...) + rawDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, nil, opts...) +{{- if ne $.TargetVersionName "ga" }} + rawDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, nil, opts...) + rawDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, nil, opts...) +{{- end }} + + return rawDesired, nil + } + canonicalDesired := &Cluster{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.StringCanonicalize(rawDesired.AzureRegion, rawInitial.AzureRegion) { + canonicalDesired.AzureRegion = rawInitial.AzureRegion + } else { + canonicalDesired.AzureRegion = rawDesired.AzureRegion + } + if dcl.StringCanonicalize(rawDesired.ResourceGroupId, rawInitial.ResourceGroupId) { + canonicalDesired.ResourceGroupId = rawInitial.ResourceGroupId + } else { + canonicalDesired.ResourceGroupId = rawDesired.ResourceGroupId + } + if dcl.IsZeroValue(rawDesired.Client) || (dcl.IsEmptyValueIndirect(rawDesired.Client) && dcl.IsEmptyValueIndirect(rawInitial.Client)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Client = rawInitial.Client + } else { + canonicalDesired.Client = rawDesired.Client + } + canonicalDesired.AzureServicesAuthentication = canonicalizeClusterAzureServicesAuthentication(rawDesired.AzureServicesAuthentication, rawInitial.AzureServicesAuthentication, opts...) + canonicalDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, rawInitial.Networking, opts...) + canonicalDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, rawInitial.ControlPlane, opts...) + canonicalDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, rawInitial.Authorization, opts...) + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, rawInitial.Fleet, opts...) +{{- if ne $.TargetVersionName "ga" }} + canonicalDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, rawInitial.LoggingConfig, opts...) + canonicalDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, rawInitial.MonitoringConfig, opts...) +{{- end }} + + if canonicalDesired.Client != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.AzureServicesAuthentication) { + canonicalDesired.Client = dcl.String("") + } + } + + if canonicalDesired.AzureServicesAuthentication != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Client) { + canonicalDesired.AzureServicesAuthentication = EmptyClusterAzureServicesAuthentication + } + } + + return canonicalDesired, nil +} + +func canonicalizeClusterNewState(c *Client, rawNew, rawDesired *Cluster) (*Cluster, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.AzureRegion) && dcl.IsEmptyValueIndirect(rawDesired.AzureRegion) { + rawNew.AzureRegion = rawDesired.AzureRegion + } else { + if dcl.StringCanonicalize(rawDesired.AzureRegion, rawNew.AzureRegion) { + rawNew.AzureRegion = rawDesired.AzureRegion + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceGroupId) && dcl.IsEmptyValueIndirect(rawDesired.ResourceGroupId) { + rawNew.ResourceGroupId = rawDesired.ResourceGroupId + } else { + if dcl.StringCanonicalize(rawDesired.ResourceGroupId, rawNew.ResourceGroupId) { + rawNew.ResourceGroupId = rawDesired.ResourceGroupId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Client) && dcl.IsEmptyValueIndirect(rawDesired.Client) { + rawNew.Client = rawDesired.Client + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.AzureServicesAuthentication) && dcl.IsEmptyValueIndirect(rawDesired.AzureServicesAuthentication) { + rawNew.AzureServicesAuthentication = rawDesired.AzureServicesAuthentication + } else { + rawNew.AzureServicesAuthentication = canonicalizeNewClusterAzureServicesAuthentication(c, rawDesired.AzureServicesAuthentication, rawNew.AzureServicesAuthentication) + } + + if dcl.IsEmptyValueIndirect(rawNew.Networking) && dcl.IsEmptyValueIndirect(rawDesired.Networking) { + rawNew.Networking = rawDesired.Networking + } else { + rawNew.Networking = canonicalizeNewClusterNetworking(c, rawDesired.Networking, rawNew.Networking) + } + + if dcl.IsEmptyValueIndirect(rawNew.ControlPlane) && dcl.IsEmptyValueIndirect(rawDesired.ControlPlane) { + rawNew.ControlPlane = rawDesired.ControlPlane + } else { + rawNew.ControlPlane = canonicalizeNewClusterControlPlane(c, rawDesired.ControlPlane, rawNew.ControlPlane) + } + + if dcl.IsEmptyValueIndirect(rawNew.Authorization) && dcl.IsEmptyValueIndirect(rawDesired.Authorization) { + rawNew.Authorization = rawDesired.Authorization + } else { + rawNew.Authorization = canonicalizeNewClusterAuthorization(c, rawDesired.Authorization, rawNew.Authorization) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Endpoint) && dcl.IsEmptyValueIndirect(rawDesired.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } else { + if dcl.StringCanonicalize(rawDesired.Endpoint, rawNew.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkloadIdentityConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkloadIdentityConfig) { + rawNew.WorkloadIdentityConfig = rawDesired.WorkloadIdentityConfig + } else { + rawNew.WorkloadIdentityConfig = canonicalizeNewClusterWorkloadIdentityConfig(c, rawDesired.WorkloadIdentityConfig, rawNew.WorkloadIdentityConfig) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Fleet) && dcl.IsEmptyValueIndirect(rawDesired.Fleet) { + rawNew.Fleet = rawDesired.Fleet + } else { + rawNew.Fleet = canonicalizeNewClusterFleet(c, rawDesired.Fleet, rawNew.Fleet) +{{- if ne $.TargetVersionName "ga" }} + } + + if dcl.IsEmptyValueIndirect(rawNew.LoggingConfig) && dcl.IsEmptyValueIndirect(rawDesired.LoggingConfig) { + rawNew.LoggingConfig = rawDesired.LoggingConfig + } else { + rawNew.LoggingConfig = canonicalizeNewClusterLoggingConfig(c, rawDesired.LoggingConfig, rawNew.LoggingConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.MonitoringConfig) && dcl.IsEmptyValueIndirect(rawDesired.MonitoringConfig) { + rawNew.MonitoringConfig = rawDesired.MonitoringConfig + } else { + rawNew.MonitoringConfig = canonicalizeNewClusterMonitoringConfig(c, rawDesired.MonitoringConfig, rawNew.MonitoringConfig) +{{- end }} + } + + return rawNew, nil +} + +func canonicalizeClusterAzureServicesAuthentication(des, initial *ClusterAzureServicesAuthentication, opts ...dcl.ApplyOption) *ClusterAzureServicesAuthentication { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAzureServicesAuthentication{} + + if dcl.StringCanonicalize(des.TenantId, initial.TenantId) || dcl.IsZeroValue(des.TenantId) { + cDes.TenantId = initial.TenantId + } else { + cDes.TenantId = des.TenantId + } + if dcl.StringCanonicalize(des.ApplicationId, initial.ApplicationId) || dcl.IsZeroValue(des.ApplicationId) { + cDes.ApplicationId = initial.ApplicationId + } else { + cDes.ApplicationId = des.ApplicationId + } + + return cDes +} + +func canonicalizeClusterAzureServicesAuthenticationSlice(des, initial []ClusterAzureServicesAuthentication, opts ...dcl.ApplyOption) []ClusterAzureServicesAuthentication { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAzureServicesAuthentication, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAzureServicesAuthentication(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAzureServicesAuthentication, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAzureServicesAuthentication(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAzureServicesAuthentication(c *Client, des, nw *ClusterAzureServicesAuthentication) *ClusterAzureServicesAuthentication { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAzureServicesAuthentication while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.TenantId, nw.TenantId) { + nw.TenantId = des.TenantId + } + if dcl.StringCanonicalize(des.ApplicationId, nw.ApplicationId) { + nw.ApplicationId = des.ApplicationId + } + + return nw +} + +func canonicalizeNewClusterAzureServicesAuthenticationSet(c *Client, des, nw []ClusterAzureServicesAuthentication) []ClusterAzureServicesAuthentication { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAzureServicesAuthentication + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAzureServicesAuthenticationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAzureServicesAuthentication(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAzureServicesAuthenticationSlice(c *Client, des, nw []ClusterAzureServicesAuthentication) []ClusterAzureServicesAuthentication { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAzureServicesAuthentication + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAzureServicesAuthentication(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterNetworking(des, initial *ClusterNetworking, opts ...dcl.ApplyOption) *ClusterNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterNetworking{} + + if dcl.StringCanonicalize(des.VirtualNetworkId, initial.VirtualNetworkId) || dcl.IsZeroValue(des.VirtualNetworkId) { + cDes.VirtualNetworkId = initial.VirtualNetworkId + } else { + cDes.VirtualNetworkId = des.VirtualNetworkId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, initial.PodAddressCidrBlocks) { + cDes.PodAddressCidrBlocks = initial.PodAddressCidrBlocks + } else { + cDes.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, initial.ServiceAddressCidrBlocks) { + cDes.ServiceAddressCidrBlocks = initial.ServiceAddressCidrBlocks + } else { + cDes.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + + return cDes +} + +func canonicalizeClusterNetworkingSlice(des, initial []ClusterNetworking, opts ...dcl.ApplyOption) []ClusterNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterNetworking(c *Client, des, nw *ClusterNetworking) *ClusterNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.VirtualNetworkId, nw.VirtualNetworkId) { + nw.VirtualNetworkId = des.VirtualNetworkId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, nw.PodAddressCidrBlocks) { + nw.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, nw.ServiceAddressCidrBlocks) { + nw.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + + return nw +} + +func canonicalizeNewClusterNetworkingSet(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterNetworkingSlice(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlane(des, initial *ClusterControlPlane, opts ...dcl.ApplyOption) *ClusterControlPlane { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlane{} + + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + if dcl.StringCanonicalize(des.SubnetId, initial.SubnetId) || dcl.IsZeroValue(des.SubnetId) { + cDes.SubnetId = initial.SubnetId + } else { + cDes.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.VmSize, initial.VmSize) || dcl.IsZeroValue(des.VmSize) { + cDes.VmSize = initial.VmSize + } else { + cDes.VmSize = des.VmSize + } + cDes.SshConfig = canonicalizeClusterControlPlaneSshConfig(des.SshConfig, initial.SshConfig, opts...) + cDes.RootVolume = canonicalizeClusterControlPlaneRootVolume(des.RootVolume, initial.RootVolume, opts...) + cDes.MainVolume = canonicalizeClusterControlPlaneMainVolume(des.MainVolume, initial.MainVolume, opts...) + cDes.DatabaseEncryption = canonicalizeClusterControlPlaneDatabaseEncryption(des.DatabaseEncryption, initial.DatabaseEncryption, opts...) + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + cDes.ProxyConfig = canonicalizeClusterControlPlaneProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) + cDes.ReplicaPlacements = canonicalizeClusterControlPlaneReplicaPlacementsSlice(des.ReplicaPlacements, initial.ReplicaPlacements, opts...) + + return cDes +} + +func canonicalizeClusterControlPlaneSlice(des, initial []ClusterControlPlane, opts ...dcl.ApplyOption) []ClusterControlPlane { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlane, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlane(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlane, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlane(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlane(c *Client, des, nw *ClusterControlPlane) *ClusterControlPlane { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlane while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + if dcl.StringCanonicalize(des.SubnetId, nw.SubnetId) { + nw.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.VmSize, nw.VmSize) { + nw.VmSize = des.VmSize + } + nw.SshConfig = canonicalizeNewClusterControlPlaneSshConfig(c, des.SshConfig, nw.SshConfig) + nw.RootVolume = canonicalizeNewClusterControlPlaneRootVolume(c, des.RootVolume, nw.RootVolume) + nw.MainVolume = canonicalizeNewClusterControlPlaneMainVolume(c, des.MainVolume, nw.MainVolume) + nw.DatabaseEncryption = canonicalizeNewClusterControlPlaneDatabaseEncryption(c, des.DatabaseEncryption, nw.DatabaseEncryption) + nw.ProxyConfig = canonicalizeNewClusterControlPlaneProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) + nw.ReplicaPlacements = canonicalizeNewClusterControlPlaneReplicaPlacementsSlice(c, des.ReplicaPlacements, nw.ReplicaPlacements) + + return nw +} + +func canonicalizeNewClusterControlPlaneSet(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlane + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSlice(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlane + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneSshConfig(des, initial *ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneSshConfig{} + + if dcl.StringCanonicalize(des.AuthorizedKey, initial.AuthorizedKey) || dcl.IsZeroValue(des.AuthorizedKey) { + cDes.AuthorizedKey = initial.AuthorizedKey + } else { + cDes.AuthorizedKey = des.AuthorizedKey + } + + return cDes +} + +func canonicalizeClusterControlPlaneSshConfigSlice(des, initial []ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneSshConfig(c *Client, des, nw *ClusterControlPlaneSshConfig) *ClusterControlPlaneSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AuthorizedKey, nw.AuthorizedKey) { + nw.AuthorizedKey = des.AuthorizedKey + } + + return nw +} + +func canonicalizeNewClusterControlPlaneSshConfigSet(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSshConfigSlice(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneRootVolume(des, initial *ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + + return cDes +} + +func canonicalizeClusterControlPlaneRootVolumeSlice(des, initial []ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneRootVolume(c *Client, des, nw *ClusterControlPlaneRootVolume) *ClusterControlPlaneRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterControlPlaneRootVolumeSet(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneRootVolumeSlice(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneMainVolume(des, initial *ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneMainVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneMainVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + + return cDes +} + +func canonicalizeClusterControlPlaneMainVolumeSlice(des, initial []ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneMainVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneMainVolume(c *Client, des, nw *ClusterControlPlaneMainVolume) *ClusterControlPlaneMainVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneMainVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterControlPlaneMainVolumeSet(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneMainVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneMainVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneMainVolumeSlice(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneMainVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneDatabaseEncryption(des, initial *ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) *ClusterControlPlaneDatabaseEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.StringCanonicalize(des.KeyId, initial.KeyId) || dcl.IsZeroValue(des.KeyId) { + cDes.KeyId = initial.KeyId + } else { + cDes.KeyId = des.KeyId + } + + return cDes +} + +func canonicalizeClusterControlPlaneDatabaseEncryptionSlice(des, initial []ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) []ClusterControlPlaneDatabaseEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryption(c *Client, des, nw *ClusterControlPlaneDatabaseEncryption) *ClusterControlPlaneDatabaseEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneDatabaseEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KeyId, nw.KeyId) { + nw.KeyId = des.KeyId + } + + return nw +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSet(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneDatabaseEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneDatabaseEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSlice(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneDatabaseEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneProxyConfig(des, initial *ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneProxyConfig{} + + if dcl.StringCanonicalize(des.ResourceGroupId, initial.ResourceGroupId) || dcl.IsZeroValue(des.ResourceGroupId) { + cDes.ResourceGroupId = initial.ResourceGroupId + } else { + cDes.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, initial.SecretId) || dcl.IsZeroValue(des.SecretId) { + cDes.SecretId = initial.SecretId + } else { + cDes.SecretId = des.SecretId + } + + return cDes +} + +func canonicalizeClusterControlPlaneProxyConfigSlice(des, initial []ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneProxyConfig(c *Client, des, nw *ClusterControlPlaneProxyConfig) *ClusterControlPlaneProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ResourceGroupId, nw.ResourceGroupId) { + nw.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, nw.SecretId) { + nw.SecretId = des.SecretId + } + + return nw +} + +func canonicalizeNewClusterControlPlaneProxyConfigSet(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneProxyConfigSlice(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneReplicaPlacements(des, initial *ClusterControlPlaneReplicaPlacements, opts ...dcl.ApplyOption) *ClusterControlPlaneReplicaPlacements { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneReplicaPlacements{} + + if dcl.StringCanonicalize(des.SubnetId, initial.SubnetId) || dcl.IsZeroValue(des.SubnetId) { + cDes.SubnetId = initial.SubnetId + } else { + cDes.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.AzureAvailabilityZone, initial.AzureAvailabilityZone) || dcl.IsZeroValue(des.AzureAvailabilityZone) { + cDes.AzureAvailabilityZone = initial.AzureAvailabilityZone + } else { + cDes.AzureAvailabilityZone = des.AzureAvailabilityZone + } + + return cDes +} + +func canonicalizeClusterControlPlaneReplicaPlacementsSlice(des, initial []ClusterControlPlaneReplicaPlacements, opts ...dcl.ApplyOption) []ClusterControlPlaneReplicaPlacements { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneReplicaPlacements(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneReplicaPlacements(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneReplicaPlacements(c *Client, des, nw *ClusterControlPlaneReplicaPlacements) *ClusterControlPlaneReplicaPlacements { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneReplicaPlacements while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SubnetId, nw.SubnetId) { + nw.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.AzureAvailabilityZone, nw.AzureAvailabilityZone) { + nw.AzureAvailabilityZone = des.AzureAvailabilityZone + } + + return nw +} + +func canonicalizeNewClusterControlPlaneReplicaPlacementsSet(c *Client, des, nw []ClusterControlPlaneReplicaPlacements) []ClusterControlPlaneReplicaPlacements { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneReplicaPlacements + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneReplicaPlacementsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneReplicaPlacements(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneReplicaPlacementsSlice(c *Client, des, nw []ClusterControlPlaneReplicaPlacements) []ClusterControlPlaneReplicaPlacements { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneReplicaPlacements + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneReplicaPlacements(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorization(des, initial *ClusterAuthorization, opts ...dcl.ApplyOption) *ClusterAuthorization { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorization{} + + cDes.AdminUsers = canonicalizeClusterAuthorizationAdminUsersSlice(des.AdminUsers, initial.AdminUsers, opts...) + cDes.AdminGroups = canonicalizeClusterAuthorizationAdminGroupsSlice(des.AdminGroups, initial.AdminGroups, opts...) + + return cDes +} + +func canonicalizeClusterAuthorizationSlice(des, initial []ClusterAuthorization, opts ...dcl.ApplyOption) []ClusterAuthorization { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorization, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorization(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorization, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorization(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorization(c *Client, des, nw *ClusterAuthorization) *ClusterAuthorization { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorization while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AdminUsers = canonicalizeNewClusterAuthorizationAdminUsersSlice(c, des.AdminUsers, nw.AdminUsers) + nw.AdminGroups = canonicalizeNewClusterAuthorizationAdminGroupsSlice(c, des.AdminGroups, nw.AdminGroups) + + return nw +} + +func canonicalizeNewClusterAuthorizationSet(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorization + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationSlice(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorization + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminUsers(des, initial *ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminUsers{} + + if dcl.StringCanonicalize(des.Username, initial.Username) || dcl.IsZeroValue(des.Username) { + cDes.Username = initial.Username + } else { + cDes.Username = des.Username + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminUsersSlice(des, initial []ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminUsers(c *Client, des, nw *ClusterAuthorizationAdminUsers) *ClusterAuthorizationAdminUsers { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminUsers while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminUsersSet(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminUsers + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminUsersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminUsersSlice(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminUsers + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminGroups(des, initial *ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminGroups{} + + if dcl.StringCanonicalize(des.Group, initial.Group) || dcl.IsZeroValue(des.Group) { + cDes.Group = initial.Group + } else { + cDes.Group = des.Group + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminGroupsSlice(des, initial []ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminGroups(c *Client, des, nw *ClusterAuthorizationAdminGroups) *ClusterAuthorizationAdminGroups { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminGroups while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Group, nw.Group) { + nw.Group = des.Group + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSet(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminGroups + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminGroupsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSlice(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminGroups + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterWorkloadIdentityConfig(des, initial *ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) *ClusterWorkloadIdentityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterWorkloadIdentityConfig{} + + if dcl.StringCanonicalize(des.IssuerUri, initial.IssuerUri) || dcl.IsZeroValue(des.IssuerUri) { + cDes.IssuerUri = initial.IssuerUri + } else { + cDes.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, initial.WorkloadPool) || dcl.IsZeroValue(des.WorkloadPool) { + cDes.WorkloadPool = initial.WorkloadPool + } else { + cDes.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, initial.IdentityProvider) || dcl.IsZeroValue(des.IdentityProvider) { + cDes.IdentityProvider = initial.IdentityProvider + } else { + cDes.IdentityProvider = des.IdentityProvider + } + + return cDes +} + +func canonicalizeClusterWorkloadIdentityConfigSlice(des, initial []ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) []ClusterWorkloadIdentityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterWorkloadIdentityConfig(c *Client, des, nw *ClusterWorkloadIdentityConfig) *ClusterWorkloadIdentityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterWorkloadIdentityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IssuerUri, nw.IssuerUri) { + nw.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, nw.WorkloadPool) { + nw.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, nw.IdentityProvider) { + nw.IdentityProvider = des.IdentityProvider + } + + return nw +} + +func canonicalizeNewClusterWorkloadIdentityConfigSet(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterWorkloadIdentityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterWorkloadIdentityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterWorkloadIdentityConfigSlice(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterWorkloadIdentityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterFleet(des, initial *ClusterFleet, opts ...dcl.ApplyOption) *ClusterFleet { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterFleet{} + + if dcl.PartialSelfLinkToSelfLink(des.Project, initial.Project) || dcl.IsZeroValue(des.Project) { + cDes.Project = initial.Project + } else { + cDes.Project = des.Project + } + + return cDes +} + +func canonicalizeClusterFleetSlice(des, initial []ClusterFleet, opts ...dcl.ApplyOption) []ClusterFleet { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterFleet, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterFleet(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterFleet, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterFleet(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterFleet(c *Client, des, nw *ClusterFleet) *ClusterFleet { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterFleet while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.PartialSelfLinkToSelfLink(des.Project, nw.Project) { + nw.Project = des.Project + } + if dcl.StringCanonicalize(des.Membership, nw.Membership) { + nw.Membership = des.Membership + } + + return nw +} + +func canonicalizeNewClusterFleetSet(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterFleet + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterFleetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterFleet(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterFleetSlice(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterFleet + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterFleet(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeClusterLoggingConfig(des, initial *ClusterLoggingConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfig{} + + cDes.ComponentConfig = canonicalizeClusterLoggingConfigComponentConfig(des.ComponentConfig, initial.ComponentConfig, opts...) + + return cDes +} + +func canonicalizeClusterLoggingConfigSlice(des, initial []ClusterLoggingConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfig(c *Client, des, nw *ClusterLoggingConfig) *ClusterLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ComponentConfig = canonicalizeNewClusterLoggingConfigComponentConfig(c, des.ComponentConfig, nw.ComponentConfig) + + return nw +} + +func canonicalizeNewClusterLoggingConfigSet(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigSlice(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterLoggingConfigComponentConfig(des, initial *ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfigComponentConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsZeroValue(des.EnableComponents) || (dcl.IsEmptyValueIndirect(des.EnableComponents) && dcl.IsEmptyValueIndirect(initial.EnableComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EnableComponents = initial.EnableComponents + } else { + cDes.EnableComponents = des.EnableComponents + } + + return cDes +} + +func canonicalizeClusterLoggingConfigComponentConfigSlice(des, initial []ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfigComponentConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfigComponentConfig(c *Client, des, nw *ClusterLoggingConfigComponentConfig) *ClusterLoggingConfigComponentConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfigComponentConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSet(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfigComponentConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigComponentConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSlice(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfigComponentConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfig(des, initial *ClusterMonitoringConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfig{} + + cDes.ManagedPrometheusConfig = canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des.ManagedPrometheusConfig, initial.ManagedPrometheusConfig, opts...) + + return cDes +} + +func canonicalizeClusterMonitoringConfigSlice(des, initial []ClusterMonitoringConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfig(c *Client, des, nw *ClusterMonitoringConfig) *ClusterMonitoringConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ManagedPrometheusConfig = canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, des.ManagedPrometheusConfig, nw.ManagedPrometheusConfig) + + return nw +} + +func canonicalizeNewClusterMonitoringConfigSet(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigSlice(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des, initial *ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + + return cDes +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfigSlice(des, initial []ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfigManagedPrometheusConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c *Client, des, nw *ClusterMonitoringConfigManagedPrometheusConfig) *ClusterMonitoringConfigManagedPrometheusConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfigManagedPrometheusConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + + return nw +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSet(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfigManagedPrometheusConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfigManagedPrometheusConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &n)) +{{- end }} + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffCluster(c *Client, desired, actual *Cluster, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureRegion, actual.AzureRegion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AzureRegion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceGroupId, actual.ResourceGroupId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroupId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Client, actual.Client, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AzureClient")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureServicesAuthentication, actual.AzureServicesAuthentication, dcl.DiffInfo{ObjectFunction: compareClusterAzureServicesAuthenticationNewStyle, EmptyObject: EmptyClusterAzureServicesAuthentication, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AzureServicesAuthentication")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Networking, actual.Networking, dcl.DiffInfo{ObjectFunction: compareClusterNetworkingNewStyle, EmptyObject: EmptyClusterNetworking, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Networking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ControlPlane, actual.ControlPlane, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneNewStyle, EmptyObject: EmptyClusterControlPlane, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ControlPlane")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Authorization, actual.Authorization, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationNewStyle, EmptyObject: EmptyClusterAuthorization, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Authorization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadIdentityConfig, actual.WorkloadIdentityConfig, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterWorkloadIdentityConfigNewStyle, EmptyObject: EmptyClusterWorkloadIdentityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadIdentityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fleet, actual.Fleet, dcl.DiffInfo{ObjectFunction: compareClusterFleetNewStyle, EmptyObject: EmptyClusterFleet, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fleet")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- if ne $.TargetVersionName "ga" }} + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigNewStyle, EmptyObject: EmptyClusterLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MonitoringConfig, actual.MonitoringConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoringConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- end }} + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareClusterAzureServicesAuthenticationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAzureServicesAuthentication) + if !ok { + desiredNotPointer, ok := d.(ClusterAzureServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAzureServicesAuthentication or *ClusterAzureServicesAuthentication", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAzureServicesAuthentication) + if !ok { + actualNotPointer, ok := a.(ClusterAzureServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAzureServicesAuthentication", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TenantId, actual.TenantId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("TenantId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ApplicationId, actual.ApplicationId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("ApplicationId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterNetworking) + if !ok { + desiredNotPointer, ok := d.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking or *ClusterNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterNetworking) + if !ok { + actualNotPointer, ok := a.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.VirtualNetworkId, actual.VirtualNetworkId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VirtualNetworkId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodAddressCidrBlocks, actual.PodAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAddressCidrBlocks, actual.ServiceAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlane) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane or *ClusterControlPlane", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlane) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VmSize, actual.VmSize, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("VmSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneSshConfigNewStyle, EmptyObject: EmptyClusterControlPlaneSshConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneRootVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainVolume, actual.MainVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneMainVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneMainVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DatabaseEncryption, actual.DatabaseEncryption, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneDatabaseEncryptionNewStyle, EmptyObject: EmptyClusterControlPlaneDatabaseEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DatabaseEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneProxyConfigNewStyle, EmptyObject: EmptyClusterControlPlaneProxyConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReplicaPlacements, actual.ReplicaPlacements, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneReplicaPlacementsNewStyle, EmptyObject: EmptyClusterControlPlaneReplicaPlacements, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReplicaPlacements")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneSshConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig or *ClusterControlPlaneSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneSshConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AuthorizedKey, actual.AuthorizedKey, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AuthorizedKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneRootVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume or *ClusterControlPlaneRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneRootVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneMainVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneMainVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume or *ClusterControlPlaneMainVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneMainVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneDatabaseEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption or *ClusterControlPlaneDatabaseEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KeyId, actual.KeyId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneProxyConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig or *ClusterControlPlaneProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneProxyConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceGroupId, actual.ResourceGroupId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroupId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretId, actual.SecretId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecretId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneReplicaPlacementsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneReplicaPlacements) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneReplicaPlacements) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneReplicaPlacements or *ClusterControlPlaneReplicaPlacements", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneReplicaPlacements) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneReplicaPlacements) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneReplicaPlacements", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureAvailabilityZone, actual.AzureAvailabilityZone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AzureAvailabilityZone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorization) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization or *ClusterAuthorization", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorization) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AdminUsers, actual.AdminUsers, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminUsersNewStyle, EmptyObject: EmptyClusterAuthorizationAdminUsers, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AdminUsers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AdminGroups, actual.AdminGroups, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminGroupsNewStyle, EmptyObject: EmptyClusterAuthorizationAdminGroups, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AdminGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminUsersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminUsers) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers or *ClusterAuthorizationAdminUsers", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminUsers) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminGroupsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminGroups) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups or *ClusterAuthorizationAdminGroups", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminGroups) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Group, actual.Group, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Group")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterWorkloadIdentityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterWorkloadIdentityConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig or *ClusterWorkloadIdentityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterWorkloadIdentityConfig) + if !ok { + actualNotPointer, ok := a.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IssuerUri, actual.IssuerUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IssuerUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadPool, actual.WorkloadPool, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdentityProvider, actual.IdentityProvider, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityProvider")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterFleetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterFleet) + if !ok { + desiredNotPointer, ok := d.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet or *ClusterFleet", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterFleet) + if !ok { + actualNotPointer, ok := a.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig or *ClusterLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ComponentConfig, actual.ComponentConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigComponentConfigNewStyle, EmptyObject: EmptyClusterLoggingConfigComponentConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComponentConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigComponentConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfigComponentConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig or *ClusterLoggingConfigComponentConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfigComponentConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableComponents, actual.EnableComponents, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("EnableComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig or *ClusterMonitoringConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ManagedPrometheusConfig, actual.ManagedPrometheusConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigManagedPrometheusConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfigManagedPrometheusConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedPrometheusConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig or *ClusterMonitoringConfigManagedPrometheusConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Cluster) urlNormalized() *Cluster { + normalized := dcl.Copy(*r).(Cluster) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.AzureRegion = dcl.SelfLinkToName(r.AzureRegion) + normalized.ResourceGroupId = dcl.SelfLinkToName(r.ResourceGroupId) + normalized.Client = dcl.SelfLinkToName(r.Client) + normalized.Endpoint = dcl.SelfLinkToName(r.Endpoint) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Cluster) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAzureCluster" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Cluster resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Cluster) marshal(c *Client) ([]byte, error) { + m, err := expandCluster(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Cluster: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalCluster decodes JSON responses into the Cluster resource schema. +func unmarshalCluster(b []byte, c *Client, res *Cluster) (*Cluster, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapCluster(m, c, res) +} + +func unmarshalMapCluster(m map[string]interface{}, c *Client, res *Cluster) (*Cluster, error) { + + flattened := flattenCluster(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandCluster expands Cluster into a JSON request object. +func expandCluster(c *Client, f *Cluster) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/azureClusters/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.AzureRegion; dcl.ValueShouldBeSent(v) { + m["azureRegion"] = v + } + if v := f.ResourceGroupId; dcl.ValueShouldBeSent(v) { + m["resourceGroupId"] = v + } + if v := f.Client; dcl.ValueShouldBeSent(v) { + m["azureClient"] = v + } + if v, err := expandClusterAzureServicesAuthentication(c, f.AzureServicesAuthentication, res); err != nil { + return nil, fmt.Errorf("error expanding AzureServicesAuthentication into azureServicesAuthentication: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["azureServicesAuthentication"] = v + } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networking"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authorization"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandClusterFleet(c, f.Fleet, res); err != nil { + return nil, fmt.Errorf("error expanding Fleet into fleet: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["fleet"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoringConfig"] = v +{{- end }} + } + + return m, nil +} + +// flattenCluster flattens Cluster from a JSON request object into the +// Cluster type. +func flattenCluster(c *Client, i interface{}, res *Cluster) *Cluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Cluster{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.AzureRegion = dcl.FlattenString(m["azureRegion"]) + resultRes.ResourceGroupId = dcl.FlattenString(m["resourceGroupId"]) + resultRes.Client = dcl.FlattenString(m["azureClient"]) + resultRes.AzureServicesAuthentication = flattenClusterAzureServicesAuthentication(c, m["azureServicesAuthentication"], res) + resultRes.Networking = flattenClusterNetworking(c, m["networking"], res) + resultRes.ControlPlane = flattenClusterControlPlane(c, m["controlPlane"], res) + resultRes.Authorization = flattenClusterAuthorization(c, m["authorization"], res) + resultRes.State = flattenClusterStateEnum(m["state"]) + resultRes.Endpoint = dcl.FlattenString(m["endpoint"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.WorkloadIdentityConfig = flattenClusterWorkloadIdentityConfig(c, m["workloadIdentityConfig"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Fleet = flattenClusterFleet(c, m["fleet"], res) +{{- if ne $.TargetVersionName "ga" }} + resultRes.LoggingConfig = flattenClusterLoggingConfig(c, m["loggingConfig"], res) + resultRes.MonitoringConfig = flattenClusterMonitoringConfig(c, m["monitoringConfig"], res) +{{- end }} + + return resultRes +} + +// expandClusterAzureServicesAuthenticationMap expands the contents of ClusterAzureServicesAuthentication into a JSON +// request object. +func expandClusterAzureServicesAuthenticationMap(c *Client, f map[string]ClusterAzureServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAzureServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAzureServicesAuthenticationSlice expands the contents of ClusterAzureServicesAuthentication into a JSON +// request object. +func expandClusterAzureServicesAuthenticationSlice(c *Client, f []ClusterAzureServicesAuthentication, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAzureServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAzureServicesAuthenticationMap flattens the contents of ClusterAzureServicesAuthentication from a JSON +// response object. +func flattenClusterAzureServicesAuthenticationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAzureServicesAuthentication { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAzureServicesAuthentication{} + } + + if len(a) == 0 { + return map[string]ClusterAzureServicesAuthentication{} + } + + items := make(map[string]ClusterAzureServicesAuthentication) + for k, item := range a { + items[k] = *flattenClusterAzureServicesAuthentication(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAzureServicesAuthenticationSlice flattens the contents of ClusterAzureServicesAuthentication from a JSON +// response object. +func flattenClusterAzureServicesAuthenticationSlice(c *Client, i interface{}, res *Cluster) []ClusterAzureServicesAuthentication { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAzureServicesAuthentication{} + } + + if len(a) == 0 { + return []ClusterAzureServicesAuthentication{} + } + + items := make([]ClusterAzureServicesAuthentication, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAzureServicesAuthentication(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAzureServicesAuthentication expands an instance of ClusterAzureServicesAuthentication into a JSON +// request object. +func expandClusterAzureServicesAuthentication(c *Client, f *ClusterAzureServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TenantId; !dcl.IsEmptyValueIndirect(v) { + m["tenantId"] = v + } + if v := f.ApplicationId; !dcl.IsEmptyValueIndirect(v) { + m["applicationId"] = v + } + + return m, nil +} + +// flattenClusterAzureServicesAuthentication flattens an instance of ClusterAzureServicesAuthentication from a JSON +// response object. +func flattenClusterAzureServicesAuthentication(c *Client, i interface{}, res *Cluster) *ClusterAzureServicesAuthentication { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAzureServicesAuthentication{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAzureServicesAuthentication + } + r.TenantId = dcl.FlattenString(m["tenantId"]) + r.ApplicationId = dcl.FlattenString(m["applicationId"]) + + return r +} + +// expandClusterNetworkingMap expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingMap(c *Client, f map[string]ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterNetworkingSlice expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingSlice(c *Client, f []ClusterNetworking, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterNetworkingMap flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingMap(c *Client, i interface{}, res *Cluster) map[string]ClusterNetworking { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterNetworking{} + } + + if len(a) == 0 { + return map[string]ClusterNetworking{} + } + + items := make(map[string]ClusterNetworking) + for k, item := range a { + items[k] = *flattenClusterNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterNetworkingSlice flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingSlice(c *Client, i interface{}, res *Cluster) []ClusterNetworking { + a, ok := i.([]interface{}) + if !ok { + return []ClusterNetworking{} + } + + if len(a) == 0 { + return []ClusterNetworking{} + } + + items := make([]ClusterNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterNetworking(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterNetworking expands an instance of ClusterNetworking into a JSON +// request object. +func expandClusterNetworking(c *Client, f *ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.VirtualNetworkId; !dcl.IsEmptyValueIndirect(v) { + m["virtualNetworkId"] = v + } + if v := f.PodAddressCidrBlocks; v != nil { + m["podAddressCidrBlocks"] = v + } + if v := f.ServiceAddressCidrBlocks; v != nil { + m["serviceAddressCidrBlocks"] = v + } + + return m, nil +} + +// flattenClusterNetworking flattens an instance of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworking(c *Client, i interface{}, res *Cluster) *ClusterNetworking { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterNetworking + } + r.VirtualNetworkId = dcl.FlattenString(m["virtualNetworkId"]) + r.PodAddressCidrBlocks = dcl.FlattenStringSlice(m["podAddressCidrBlocks"]) + r.ServiceAddressCidrBlocks = dcl.FlattenStringSlice(m["serviceAddressCidrBlocks"]) + + return r +} + +// expandClusterControlPlaneMap expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneMap(c *Client, f map[string]ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSlice expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneSlice(c *Client, f []ClusterControlPlane, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMap flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlane { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlane{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlane{} + } + + items := make(map[string]ClusterControlPlane) + for k, item := range a { + items[k] = *flattenClusterControlPlane(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSlice flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlane { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlane{} + } + + if len(a) == 0 { + return []ClusterControlPlane{} + } + + items := make([]ClusterControlPlane, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlane(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlane expands an instance of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlane(c *Client, f *ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v := f.SubnetId; !dcl.IsEmptyValueIndirect(v) { + m["subnetId"] = v + } + if v := f.VmSize; !dcl.IsEmptyValueIndirect(v) { + m["vmSize"] = v + } + if v, err := expandClusterControlPlaneSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } + if v, err := expandClusterControlPlaneRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v, err := expandClusterControlPlaneMainVolume(c, f.MainVolume, res); err != nil { + return nil, fmt.Errorf("error expanding MainVolume into mainVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["mainVolume"] = v + } + if v, err := expandClusterControlPlaneDatabaseEncryption(c, f.DatabaseEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding DatabaseEncryption into databaseEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["databaseEncryption"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v, err := expandClusterControlPlaneProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v + } + if v, err := expandClusterControlPlaneReplicaPlacementsSlice(c, f.ReplicaPlacements, res); err != nil { + return nil, fmt.Errorf("error expanding ReplicaPlacements into replicaPlacements: %w", err) + } else if v != nil { + m["replicaPlacements"] = v + } + + return m, nil +} + +// flattenClusterControlPlane flattens an instance of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlane(c *Client, i interface{}, res *Cluster) *ClusterControlPlane { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlane{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlane + } + r.Version = dcl.FlattenString(m["version"]) + r.SubnetId = dcl.FlattenString(m["subnetId"]) + r.VmSize = dcl.FlattenString(m["vmSize"]) + r.SshConfig = flattenClusterControlPlaneSshConfig(c, m["sshConfig"], res) + r.RootVolume = flattenClusterControlPlaneRootVolume(c, m["rootVolume"], res) + r.MainVolume = flattenClusterControlPlaneMainVolume(c, m["mainVolume"], res) + r.DatabaseEncryption = flattenClusterControlPlaneDatabaseEncryption(c, m["databaseEncryption"], res) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.ProxyConfig = flattenClusterControlPlaneProxyConfig(c, m["proxyConfig"], res) + r.ReplicaPlacements = flattenClusterControlPlaneReplicaPlacementsSlice(c, m["replicaPlacements"], res) + + return r +} + +// expandClusterControlPlaneSshConfigMap expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigMap(c *Client, f map[string]ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSshConfigSlice expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigSlice(c *Client, f []ClusterControlPlaneSshConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneSshConfigMap flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneSshConfig{} + } + + items := make(map[string]ClusterControlPlaneSshConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSshConfigSlice flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneSshConfig{} + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneSshConfig expands an instance of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfig(c *Client, f *ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AuthorizedKey; !dcl.IsEmptyValueIndirect(v) { + m["authorizedKey"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneSshConfig flattens an instance of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneSshConfig + } + r.AuthorizedKey = dcl.FlattenString(m["authorizedKey"]) + + return r +} + +// expandClusterControlPlaneRootVolumeMap expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeMap(c *Client, f map[string]ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneRootVolumeSlice expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeSlice(c *Client, f []ClusterControlPlaneRootVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneRootVolumeMap flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneRootVolume{} + } + + items := make(map[string]ClusterControlPlaneRootVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeSlice flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneRootVolume{} + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneRootVolume expands an instance of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolume(c *Client, f *ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneRootVolume flattens an instance of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + + return r +} + +// expandClusterControlPlaneMainVolumeMap expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeMap(c *Client, f map[string]ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneMainVolumeSlice expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeSlice(c *Client, f []ClusterControlPlaneMainVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMainVolumeMap flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneMainVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneMainVolume{} + } + + items := make(map[string]ClusterControlPlaneMainVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeSlice flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneMainVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneMainVolume{} + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneMainVolume expands an instance of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolume(c *Client, f *ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneMainVolume flattens an instance of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneMainVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneMainVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneMainVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + + return r +} + +// expandClusterControlPlaneDatabaseEncryptionMap expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionMap(c *Client, f map[string]ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneDatabaseEncryptionSlice expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionSlice(c *Client, f []ClusterControlPlaneDatabaseEncryption, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneDatabaseEncryptionMap flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneDatabaseEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + items := make(map[string]ClusterControlPlaneDatabaseEncryption) + for k, item := range a { + items[k] = *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneDatabaseEncryptionSlice flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneDatabaseEncryption { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return []ClusterControlPlaneDatabaseEncryption{} + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneDatabaseEncryption expands an instance of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryption(c *Client, f *ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KeyId; !dcl.IsEmptyValueIndirect(v) { + m["keyId"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneDatabaseEncryption flattens an instance of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryption(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneDatabaseEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneDatabaseEncryption + } + r.KeyId = dcl.FlattenString(m["keyId"]) + + return r +} + +// expandClusterControlPlaneProxyConfigMap expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigMap(c *Client, f map[string]ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneProxyConfigSlice expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigSlice(c *Client, f []ClusterControlPlaneProxyConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneProxyConfigMap flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneProxyConfig{} + } + + items := make(map[string]ClusterControlPlaneProxyConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneProxyConfigSlice flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneProxyConfig{} + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneProxyConfig expands an instance of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfig(c *Client, f *ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceGroupId; !dcl.IsEmptyValueIndirect(v) { + m["resourceGroupId"] = v + } + if v := f.SecretId; !dcl.IsEmptyValueIndirect(v) { + m["secretId"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneProxyConfig flattens an instance of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneProxyConfig + } + r.ResourceGroupId = dcl.FlattenString(m["resourceGroupId"]) + r.SecretId = dcl.FlattenString(m["secretId"]) + + return r +} + +// expandClusterControlPlaneReplicaPlacementsMap expands the contents of ClusterControlPlaneReplicaPlacements into a JSON +// request object. +func expandClusterControlPlaneReplicaPlacementsMap(c *Client, f map[string]ClusterControlPlaneReplicaPlacements, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneReplicaPlacements(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneReplicaPlacementsSlice expands the contents of ClusterControlPlaneReplicaPlacements into a JSON +// request object. +func expandClusterControlPlaneReplicaPlacementsSlice(c *Client, f []ClusterControlPlaneReplicaPlacements, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneReplicaPlacements(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneReplicaPlacementsMap flattens the contents of ClusterControlPlaneReplicaPlacements from a JSON +// response object. +func flattenClusterControlPlaneReplicaPlacementsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneReplicaPlacements { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneReplicaPlacements{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneReplicaPlacements{} + } + + items := make(map[string]ClusterControlPlaneReplicaPlacements) + for k, item := range a { + items[k] = *flattenClusterControlPlaneReplicaPlacements(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneReplicaPlacementsSlice flattens the contents of ClusterControlPlaneReplicaPlacements from a JSON +// response object. +func flattenClusterControlPlaneReplicaPlacementsSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneReplicaPlacements { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneReplicaPlacements{} + } + + if len(a) == 0 { + return []ClusterControlPlaneReplicaPlacements{} + } + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneReplicaPlacements(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneReplicaPlacements expands an instance of ClusterControlPlaneReplicaPlacements into a JSON +// request object. +func expandClusterControlPlaneReplicaPlacements(c *Client, f *ClusterControlPlaneReplicaPlacements, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SubnetId; !dcl.IsEmptyValueIndirect(v) { + m["subnetId"] = v + } + if v := f.AzureAvailabilityZone; !dcl.IsEmptyValueIndirect(v) { + m["azureAvailabilityZone"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneReplicaPlacements flattens an instance of ClusterControlPlaneReplicaPlacements from a JSON +// response object. +func flattenClusterControlPlaneReplicaPlacements(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneReplicaPlacements { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneReplicaPlacements{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneReplicaPlacements + } + r.SubnetId = dcl.FlattenString(m["subnetId"]) + r.AzureAvailabilityZone = dcl.FlattenString(m["azureAvailabilityZone"]) + + return r +} + +// expandClusterAuthorizationMap expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationMap(c *Client, f map[string]ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationSlice expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationSlice(c *Client, f []ClusterAuthorization, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationMap flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorization { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorization{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorization{} + } + + items := make(map[string]ClusterAuthorization) + for k, item := range a { + items[k] = *flattenClusterAuthorization(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationSlice flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorization { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorization{} + } + + if len(a) == 0 { + return []ClusterAuthorization{} + } + + items := make([]ClusterAuthorization, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorization(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorization expands an instance of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorization(c *Client, f *ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterAuthorizationAdminUsersSlice(c, f.AdminUsers, res); err != nil { + return nil, fmt.Errorf("error expanding AdminUsers into adminUsers: %w", err) + } else if v != nil { + m["adminUsers"] = v + } + if v, err := expandClusterAuthorizationAdminGroupsSlice(c, f.AdminGroups, res); err != nil { + return nil, fmt.Errorf("error expanding AdminGroups into adminGroups: %w", err) + } else if v != nil { + m["adminGroups"] = v + } + + return m, nil +} + +// flattenClusterAuthorization flattens an instance of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorization(c *Client, i interface{}, res *Cluster) *ClusterAuthorization { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorization{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorization + } + r.AdminUsers = flattenClusterAuthorizationAdminUsersSlice(c, m["adminUsers"], res) + r.AdminGroups = flattenClusterAuthorizationAdminGroupsSlice(c, m["adminGroups"], res) + + return r +} + +// expandClusterAuthorizationAdminUsersMap expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersMap(c *Client, f map[string]ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminUsersSlice expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersSlice(c *Client, f []ClusterAuthorizationAdminUsers, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminUsersMap flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminUsers { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminUsers{} + } + + items := make(map[string]ClusterAuthorizationAdminUsers) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminUsersSlice flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminUsers { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminUsers{} + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminUsers expands an instance of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsers(c *Client, f *ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Username; !dcl.IsEmptyValueIndirect(v) { + m["username"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminUsers flattens an instance of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsers(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminUsers { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminUsers{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminUsers + } + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandClusterAuthorizationAdminGroupsMap expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsMap(c *Client, f map[string]ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminGroupsSlice expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsSlice(c *Client, f []ClusterAuthorizationAdminGroups, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminGroupsMap flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminGroups { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminGroups{} + } + + items := make(map[string]ClusterAuthorizationAdminGroups) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminGroupsSlice flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminGroups { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminGroups{} + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminGroups expands an instance of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroups(c *Client, f *ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Group; !dcl.IsEmptyValueIndirect(v) { + m["group"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminGroups flattens an instance of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroups(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminGroups { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminGroups{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminGroups + } + r.Group = dcl.FlattenString(m["group"]) + + return r +} + +// expandClusterWorkloadIdentityConfigMap expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigMap(c *Client, f map[string]ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterWorkloadIdentityConfigSlice expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigSlice(c *Client, f []ClusterWorkloadIdentityConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterWorkloadIdentityConfigMap flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterWorkloadIdentityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return map[string]ClusterWorkloadIdentityConfig{} + } + + items := make(map[string]ClusterWorkloadIdentityConfig) + for k, item := range a { + items[k] = *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterWorkloadIdentityConfigSlice flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterWorkloadIdentityConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return []ClusterWorkloadIdentityConfig{} + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterWorkloadIdentityConfig expands an instance of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfig(c *Client, f *ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IssuerUri; !dcl.IsEmptyValueIndirect(v) { + m["issuerUri"] = v + } + if v := f.WorkloadPool; !dcl.IsEmptyValueIndirect(v) { + m["workloadPool"] = v + } + if v := f.IdentityProvider; !dcl.IsEmptyValueIndirect(v) { + m["identityProvider"] = v + } + + return m, nil +} + +// flattenClusterWorkloadIdentityConfig flattens an instance of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfig(c *Client, i interface{}, res *Cluster) *ClusterWorkloadIdentityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterWorkloadIdentityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterWorkloadIdentityConfig + } + r.IssuerUri = dcl.FlattenString(m["issuerUri"]) + r.WorkloadPool = dcl.FlattenString(m["workloadPool"]) + r.IdentityProvider = dcl.FlattenString(m["identityProvider"]) + + return r +} + +// expandClusterFleetMap expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetMap(c *Client, f map[string]ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterFleetSlice expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetSlice(c *Client, f []ClusterFleet, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterFleetMap flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterFleet { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterFleet{} + } + + if len(a) == 0 { + return map[string]ClusterFleet{} + } + + items := make(map[string]ClusterFleet) + for k, item := range a { + items[k] = *flattenClusterFleet(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterFleetSlice flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetSlice(c *Client, i interface{}, res *Cluster) []ClusterFleet { + a, ok := i.([]interface{}) + if !ok { + return []ClusterFleet{} + } + + if len(a) == 0 { + return []ClusterFleet{} + } + + items := make([]ClusterFleet, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterFleet(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterFleet expands an instance of ClusterFleet into a JSON +// request object. +func expandClusterFleet(c *Client, f *ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := dcl.DeriveField("projects/%s", f.Project, dcl.SelfLinkToName(f.Project)); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenClusterFleet flattens an instance of ClusterFleet from a JSON +// response object. +func flattenClusterFleet(c *Client, i interface{}, res *Cluster) *ClusterFleet { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterFleet{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterFleet + } + r.Project = dcl.FlattenString(m["project"]) + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandClusterLoggingConfigMap expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigMap(c *Client, f map[string]ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigSlice expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigSlice(c *Client, f []ClusterLoggingConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigMap flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfig{} + } + + items := make(map[string]ClusterLoggingConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigSlice flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfig{} + } + + items := make([]ClusterLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfig expands an instance of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfig(c *Client, f *ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterLoggingConfigComponentConfig(c, f.ComponentConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ComponentConfig into componentConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["componentConfig"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfig flattens an instance of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfig + } + r.ComponentConfig = flattenClusterLoggingConfigComponentConfig(c, m["componentConfig"], res) + + return r +} + +// expandClusterLoggingConfigComponentConfigMap expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigMap(c *Client, f map[string]ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigComponentConfigSlice expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigSlice(c *Client, f []ClusterLoggingConfigComponentConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigComponentConfigMap flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigSlice flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfig{} + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfigComponentConfig expands an instance of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfig(c *Client, f *ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableComponents; v != nil { + m["enableComponents"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfigComponentConfig flattens an instance of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfigComponentConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfigComponentConfig + } + r.EnableComponents = flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c, m["enableComponents"], res) + + return r +} + +// expandClusterMonitoringConfigMap expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigMap(c *Client, f map[string]ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigSlice expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigSlice(c *Client, f []ClusterMonitoringConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigMap flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfig{} + } + + items := make(map[string]ClusterMonitoringConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigSlice flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfig{} + } + + items := make([]ClusterMonitoringConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfig expands an instance of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfig(c *Client, f *ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, f.ManagedPrometheusConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ManagedPrometheusConfig into managedPrometheusConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["managedPrometheusConfig"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfig flattens an instance of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfig + } + r.ManagedPrometheusConfig = flattenClusterMonitoringConfigManagedPrometheusConfig(c, m["managedPrometheusConfig"], res) + + return r +} + +// expandClusterMonitoringConfigManagedPrometheusConfigMap expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, f map[string]ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigManagedPrometheusConfigSlice expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, f []ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigMap flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make(map[string]ClusterMonitoringConfigManagedPrometheusConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigSlice flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfigManagedPrometheusConfig expands an instance of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfig(c *Client, f *ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfig flattens an instance of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfigManagedPrometheusConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfigManagedPrometheusConfig + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + + return r +} + +{{- end }} +// flattenClusterStateEnumMap flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStateEnum{} + } + + items := make(map[string]ClusterStateEnum) + for k, item := range a { + items[k] = *flattenClusterStateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStateEnumSlice flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStateEnum{} + } + + if len(a) == 0 { + return []ClusterStateEnum{} + } + + items := make([]ClusterStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStateEnum with the same value as that string. +func flattenClusterStateEnum(i interface{}) *ClusterStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStateEnumRef(s) +{{- if ne $.TargetVersionName "ga" }} +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the same value as that string. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(i interface{}) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s) +{{- end }} +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Cluster) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalCluster(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clusterDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clusterApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClusterDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clusterDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clusterDiff + // For each operation name, create a clusterDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clusterDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClusterApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClusterApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clusterApiOperation, error) { + switch opName { + + case "updateClusterUpdateAzureClusterOperation": + return &updateClusterUpdateAzureClusterOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClusterFields(r *Cluster) error { + vAzureServicesAuthentication := r.AzureServicesAuthentication + if vAzureServicesAuthentication == nil { + // note: explicitly not the empty object. + vAzureServicesAuthentication = &ClusterAzureServicesAuthentication{} + } + if err := extractClusterAzureServicesAuthenticationFields(r, vAzureServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAzureServicesAuthentication) { + r.AzureServicesAuthentication = vAzureServicesAuthentication + } + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := extractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := extractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := extractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := extractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := extractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := extractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := extractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + return nil +} +func extractClusterAzureServicesAuthenticationFields(r *Cluster, o *ClusterAzureServicesAuthentication) error { + return nil +} +func extractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func extractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func extractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func extractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func extractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func extractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func extractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +func extractClusterControlPlaneReplicaPlacementsFields(r *Cluster, o *ClusterControlPlaneReplicaPlacements) error { + return nil +} +func extractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func extractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func extractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func extractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func extractClusterFleetFields(r *Cluster, o *ClusterFleet) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func extractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func extractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func extractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { + return nil +} +{{- end }} + +func postReadExtractClusterFields(r *Cluster) error { + vAzureServicesAuthentication := r.AzureServicesAuthentication + if vAzureServicesAuthentication == nil { + // note: explicitly not the empty object. + vAzureServicesAuthentication = &ClusterAzureServicesAuthentication{} + } + if err := postReadExtractClusterAzureServicesAuthenticationFields(r, vAzureServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAzureServicesAuthentication) { + r.AzureServicesAuthentication = vAzureServicesAuthentication + } + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := postReadExtractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := postReadExtractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := postReadExtractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := postReadExtractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := postReadExtractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := postReadExtractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := postReadExtractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + return nil +} +func postReadExtractClusterAzureServicesAuthenticationFields(r *Cluster, o *ClusterAzureServicesAuthentication) error { + return nil +} +func postReadExtractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func postReadExtractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func postReadExtractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func postReadExtractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func postReadExtractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func postReadExtractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func postReadExtractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +func postReadExtractClusterControlPlaneReplicaPlacementsFields(r *Cluster, o *ClusterControlPlaneReplicaPlacements) error { + return nil +} +func postReadExtractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func postReadExtractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func postReadExtractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func postReadExtractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func postReadExtractClusterFleetFields(r *Cluster, o *ClusterFleet) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func postReadExtractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func postReadExtractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func postReadExtractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl new file mode 100644 index 000000000000..57ee941b9e3d --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl @@ -0,0 +1,767 @@ +package containerazure + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type NodePool struct { + Name *string `json:"name"` + Version *string `json:"version"` + Config *NodePoolConfig `json:"config"` + SubnetId *string `json:"subnetId"` + Autoscaling *NodePoolAutoscaling `json:"autoscaling"` + State *NodePoolStateEnum `json:"state"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + MaxPodsConstraint *NodePoolMaxPodsConstraint `json:"maxPodsConstraint"` + Management *NodePoolManagement `json:"management"` + AzureAvailabilityZone *string `json:"azureAvailabilityZone"` + Project *string `json:"project"` + Location *string `json:"location"` + Cluster *string `json:"cluster"` +} + +func (r *NodePool) String() string { + return dcl.SprintResource(r) +} + +// The enum NodePoolStateEnum. +type NodePoolStateEnum string + +// NodePoolStateEnumRef returns a *NodePoolStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolStateEnumRef(s string) *NodePoolStateEnum { + v := NodePoolStateEnum(s) + return &v +} + +func (v NodePoolStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +type NodePoolConfig struct { + empty bool `json:"-"` + VmSize *string `json:"vmSize"` + RootVolume *NodePoolConfigRootVolume `json:"rootVolume"` + Tags map[string]string `json:"tags"` + Labels map[string]string `json:"labels"` + SshConfig *NodePoolConfigSshConfig `json:"sshConfig"` +{{- if ne $.TargetVersionName "ga" }} + ImageType *string `json:"imageType"` +{{- end }} + ProxyConfig *NodePoolConfigProxyConfig `json:"proxyConfig"` +} + +type jsonNodePoolConfig NodePoolConfig + +func (r *NodePoolConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfig + } else { + + r.VmSize = res.VmSize + + r.RootVolume = res.RootVolume + + r.Tags = res.Tags + + r.Labels = res.Labels + + r.SshConfig = res.SshConfig +{{- if ne $.TargetVersionName "ga" }} + + r.ImageType = res.ImageType +{{- end }} + + r.ProxyConfig = res.ProxyConfig + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfig *NodePoolConfig = &NodePoolConfig{empty: true} + +func (r *NodePoolConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` +} + +type jsonNodePoolConfigRootVolume NodePoolConfigRootVolume + +func (r *NodePoolConfigRootVolume) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigRootVolume + } else { + + r.SizeGib = res.SizeGib + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigRootVolume *NodePoolConfigRootVolume = &NodePoolConfigRootVolume{empty: true} + +func (r *NodePoolConfigRootVolume) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigSshConfig struct { + empty bool `json:"-"` + AuthorizedKey *string `json:"authorizedKey"` +} + +type jsonNodePoolConfigSshConfig NodePoolConfigSshConfig + +func (r *NodePoolConfigSshConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigSshConfig + } else { + + r.AuthorizedKey = res.AuthorizedKey + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigSshConfig *NodePoolConfigSshConfig = &NodePoolConfigSshConfig{empty: true} + +func (r *NodePoolConfigSshConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigProxyConfig struct { + empty bool `json:"-"` + ResourceGroupId *string `json:"resourceGroupId"` + SecretId *string `json:"secretId"` +} + +type jsonNodePoolConfigProxyConfig NodePoolConfigProxyConfig + +func (r *NodePoolConfigProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigProxyConfig + } else { + + r.ResourceGroupId = res.ResourceGroupId + + r.SecretId = res.SecretId + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigProxyConfig *NodePoolConfigProxyConfig = &NodePoolConfigProxyConfig{empty: true} + +func (r *NodePoolConfigProxyConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigProxyConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolAutoscaling struct { + empty bool `json:"-"` + MinNodeCount *int64 `json:"minNodeCount"` + MaxNodeCount *int64 `json:"maxNodeCount"` +} + +type jsonNodePoolAutoscaling NodePoolAutoscaling + +func (r *NodePoolAutoscaling) UnmarshalJSON(data []byte) error { + var res jsonNodePoolAutoscaling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolAutoscaling + } else { + + r.MinNodeCount = res.MinNodeCount + + r.MaxNodeCount = res.MaxNodeCount + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolAutoscaling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolAutoscaling *NodePoolAutoscaling = &NodePoolAutoscaling{empty: true} + +func (r *NodePoolAutoscaling) Empty() bool { + return r.empty +} + +func (r *NodePoolAutoscaling) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolAutoscaling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolMaxPodsConstraint struct { + empty bool `json:"-"` + MaxPodsPerNode *int64 `json:"maxPodsPerNode"` +} + +type jsonNodePoolMaxPodsConstraint NodePoolMaxPodsConstraint + +func (r *NodePoolMaxPodsConstraint) UnmarshalJSON(data []byte) error { + var res jsonNodePoolMaxPodsConstraint + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolMaxPodsConstraint + } else { + + r.MaxPodsPerNode = res.MaxPodsPerNode + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolMaxPodsConstraint is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolMaxPodsConstraint *NodePoolMaxPodsConstraint = &NodePoolMaxPodsConstraint{empty: true} + +func (r *NodePoolMaxPodsConstraint) Empty() bool { + return r.empty +} + +func (r *NodePoolMaxPodsConstraint) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolMaxPodsConstraint) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolManagement struct { + empty bool `json:"-"` + AutoRepair *bool `json:"autoRepair"` +} + +type jsonNodePoolManagement NodePoolManagement + +func (r *NodePoolManagement) UnmarshalJSON(data []byte) error { + var res jsonNodePoolManagement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolManagement + } else { + + r.AutoRepair = res.AutoRepair + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolManagement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolManagement *NodePoolManagement = &NodePoolManagement{empty: true} + +func (r *NodePoolManagement) Empty() bool { + return r.empty +} + +func (r *NodePoolManagement) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolManagement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *NodePool) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_azure", + Type: "NodePool", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containerazure", +{{- end }} + } +} + +func (r *NodePool) ID() (string, error) { + if err := extractNodePoolFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "version": dcl.ValueOrEmptyString(nr.Version), + "config": dcl.ValueOrEmptyString(nr.Config), + "subnet_id": dcl.ValueOrEmptyString(nr.SubnetId), + "autoscaling": dcl.ValueOrEmptyString(nr.Autoscaling), + "state": dcl.ValueOrEmptyString(nr.State), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "max_pods_constraint": dcl.ValueOrEmptyString(nr.MaxPodsConstraint), + "management": dcl.ValueOrEmptyString(nr.Management), + "azure_availability_zone": dcl.ValueOrEmptyString(nr.AzureAvailabilityZone), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const NodePoolMaxPage = -1 + +type NodePoolList struct { + Items []*NodePool + + nextToken string + + pageSize int32 + + resource *NodePool +} + +func (l *NodePoolList) HasNext() bool { + return l.nextToken != "" +} + +func (l *NodePoolList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listNodePool(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListNodePool(ctx context.Context, project, location, cluster string) (*NodePoolList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListNodePoolWithMaxResults(ctx, project, location, cluster, NodePoolMaxPage) + +} + +func (c *Client) ListNodePoolWithMaxResults(ctx context.Context, project, location, cluster string, pageSize int32) (*NodePoolList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &NodePool{ + Project: &project, + Location: &location, + Cluster: &cluster, + } + items, token, err := c.listNodePool(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &NodePoolList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetNodePool(ctx context.Context, r *NodePool) (*NodePool, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractNodePoolFields(r) + + b, err := c.getNodePoolRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalNodePool(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Cluster = r.Cluster + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeNodePoolNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractNodePoolFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteNodePool(ctx context.Context, r *NodePool) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("NodePool resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting NodePool...") + deleteOp := deleteNodePoolOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllNodePool deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllNodePool(ctx context.Context, project, location, cluster string, filter func(*NodePool) bool) error { + listObj, err := c.ListNodePool(ctx, project, location, cluster) + if err != nil { + return err + } + + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyNodePool(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *NodePool + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyNodePoolHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyNodePoolHelper(c *Client, ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyNodePool...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractNodePoolFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.nodePoolDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToNodePoolDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []nodePoolApiOperation + if create { + ops = append(ops, &createNodePoolOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyNodePoolDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyNodePoolDiff(c *Client, ctx context.Context, desired *NodePool, rawDesired *NodePool, ops []nodePoolApiOperation, opts ...dcl.ApplyOption) (*NodePool, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetNodePool(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createNodePoolOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapNodePool(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeNodePoolNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeNodePoolNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeNodePoolDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractNodePoolFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractNodePoolFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffNodePool(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl new file mode 100644 index 000000000000..1c1013d0fdf1 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl @@ -0,0 +1,3346 @@ +package containerazure + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *NodePool) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "config"); err != nil { + return err + } + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "autoscaling"); err != nil { + return err + } + if err := dcl.Required(r, "maxPodsConstraint"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Cluster, "Cluster"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Autoscaling) { + if err := r.Autoscaling.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MaxPodsConstraint) { + if err := r.MaxPodsConstraint.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Management) { + if err := r.Management.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfig) validate() error { + if err := dcl.Required(r, "sshConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfigRootVolume) validate() error { + return nil +} +func (r *NodePoolConfigSshConfig) validate() error { + if err := dcl.Required(r, "authorizedKey"); err != nil { + return err + } + return nil +} +func (r *NodePoolConfigProxyConfig) validate() error { + if err := dcl.Required(r, "resourceGroupId"); err != nil { + return err + } + if err := dcl.Required(r, "secretId"); err != nil { + return err + } + return nil +} +func (r *NodePoolAutoscaling) validate() error { + if err := dcl.Required(r, "minNodeCount"); err != nil { + return err + } + if err := dcl.Required(r, "maxNodeCount"); err != nil { + return err + } + return nil +} +func (r *NodePoolMaxPodsConstraint) validate() error { + if err := dcl.Required(r, "maxPodsPerNode"); err != nil { + return err + } + return nil +} +func (r *NodePoolManagement) validate() error { + return nil +} +func (r *NodePool) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *NodePool) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *NodePool) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools?azureNodePoolId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// nodePoolApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type nodePoolApiOperation interface { + do(context.Context, *NodePool, *Client) error +} + +// newUpdateNodePoolUpdateAzureNodePoolRequest creates a request for an +// NodePool resource's UpdateAzureNodePool update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateNodePoolUpdateAzureNodePoolRequest(ctx context.Context, f *NodePool, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + req["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["config"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["autoscaling"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["management"] = v + } + b, err := c.getNodePoolRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateNodePoolUpdateAzureNodePoolRequest converts the update into +// the final JSON request body. +func marshalUpdateNodePoolUpdateAzureNodePoolRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateNodePoolUpdateAzureNodePoolOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateNodePoolUpdateAzureNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + _, err := c.GetNodePool(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAzureNodePool") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateNodePoolUpdateAzureNodePoolRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateNodePoolUpdateAzureNodePoolRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listNodePoolRaw(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != NodePoolMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listNodePoolOperation struct { + AzureNodePools []map[string]interface{} `json:"azureNodePools"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listNodePool(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]*NodePool, string, error) { + b, err := c.listNodePoolRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listNodePoolOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*NodePool + for _, v := range m.AzureNodePools { + res, err := unmarshalMapNodePool(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Cluster = r.Cluster + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllNodePool(ctx context.Context, f func(*NodePool) bool, resources []*NodePool) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteNodePool(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteNodePoolOperation struct{} + +func (op *deleteNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + r, err := c.GetNodePool(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "NodePool not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetNodePool checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetNodePool(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createNodePoolOperation struct { + response map[string]interface{} +} + +func (op *createNodePoolOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetNodePool(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getNodePoolRaw(ctx context.Context, r *NodePool) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) nodePoolDiffsForRawDesired(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (initial, desired *NodePool, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *NodePool + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*NodePool); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected NodePool, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetNodePool(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a NodePool resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve NodePool resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that NodePool resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for NodePool: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for NodePool: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractNodePoolFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeNodePoolInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for NodePool: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for NodePool: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffNodePool(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeNodePoolInitialState(rawInitial, rawDesired *NodePool) (*NodePool, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeNodePoolDesiredState(rawDesired, rawInitial *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, nil, opts...) + rawDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, nil, opts...) + rawDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, nil, opts...) + rawDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &NodePool{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Version, rawInitial.Version) { + canonicalDesired.Version = rawInitial.Version + } else { + canonicalDesired.Version = rawDesired.Version + } + canonicalDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, rawInitial.Config, opts...) + if dcl.StringCanonicalize(rawDesired.SubnetId, rawInitial.SubnetId) { + canonicalDesired.SubnetId = rawInitial.SubnetId + } else { + canonicalDesired.SubnetId = rawDesired.SubnetId + } + canonicalDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, rawInitial.Autoscaling, opts...) + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + canonicalDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, rawInitial.MaxPodsConstraint, opts...) + canonicalDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, rawInitial.Management, opts...) + if dcl.StringCanonicalize(rawDesired.AzureAvailabilityZone, rawInitial.AzureAvailabilityZone) { + canonicalDesired.AzureAvailabilityZone = rawInitial.AzureAvailabilityZone + } else { + canonicalDesired.AzureAvailabilityZone = rawDesired.AzureAvailabilityZone + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Cluster, rawInitial.Cluster) { + canonicalDesired.Cluster = rawInitial.Cluster + } else { + canonicalDesired.Cluster = rawDesired.Cluster + } + return canonicalDesired, nil +} + +func canonicalizeNodePoolNewState(c *Client, rawNew, rawDesired *NodePool) (*NodePool, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) { + rawNew.Version = rawDesired.Version + } else { + if dcl.StringCanonicalize(rawDesired.Version, rawNew.Version) { + rawNew.Version = rawDesired.Version + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Config) && dcl.IsEmptyValueIndirect(rawDesired.Config) { + rawNew.Config = rawDesired.Config + } else { + rawNew.Config = canonicalizeNewNodePoolConfig(c, rawDesired.Config, rawNew.Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.SubnetId) && dcl.IsEmptyValueIndirect(rawDesired.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } else { + if dcl.StringCanonicalize(rawDesired.SubnetId, rawNew.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Autoscaling) && dcl.IsEmptyValueIndirect(rawDesired.Autoscaling) { + rawNew.Autoscaling = rawDesired.Autoscaling + } else { + rawNew.Autoscaling = canonicalizeNewNodePoolAutoscaling(c, rawDesired.Autoscaling, rawNew.Autoscaling) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.MaxPodsConstraint) && dcl.IsEmptyValueIndirect(rawDesired.MaxPodsConstraint) { + rawNew.MaxPodsConstraint = rawDesired.MaxPodsConstraint + } else { + rawNew.MaxPodsConstraint = canonicalizeNewNodePoolMaxPodsConstraint(c, rawDesired.MaxPodsConstraint, rawNew.MaxPodsConstraint) + } + + if dcl.IsEmptyValueIndirect(rawNew.Management) && dcl.IsEmptyValueIndirect(rawDesired.Management) { + rawNew.Management = rawDesired.Management + } else { + rawNew.Management = canonicalizeNewNodePoolManagement(c, rawDesired.Management, rawNew.Management) + } + + if dcl.IsEmptyValueIndirect(rawNew.AzureAvailabilityZone) && dcl.IsEmptyValueIndirect(rawDesired.AzureAvailabilityZone) { + rawNew.AzureAvailabilityZone = rawDesired.AzureAvailabilityZone + } else { + if dcl.StringCanonicalize(rawDesired.AzureAvailabilityZone, rawNew.AzureAvailabilityZone) { + rawNew.AzureAvailabilityZone = rawDesired.AzureAvailabilityZone + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Cluster = rawDesired.Cluster + + return rawNew, nil +} + +func canonicalizeNodePoolConfig(des, initial *NodePoolConfig, opts ...dcl.ApplyOption) *NodePoolConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfig{} + + if dcl.StringCanonicalize(des.VmSize, initial.VmSize) || dcl.IsZeroValue(des.VmSize) { + cDes.VmSize = initial.VmSize + } else { + cDes.VmSize = des.VmSize + } + cDes.RootVolume = canonicalizeNodePoolConfigRootVolume(des.RootVolume, initial.RootVolume, opts...) + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + cDes.SshConfig = canonicalizeNodePoolConfigSshConfig(des.SshConfig, initial.SshConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + if dcl.StringCanonicalize(des.ImageType, initial.ImageType) || dcl.IsZeroValue(des.ImageType) { + cDes.ImageType = initial.ImageType + } else { + cDes.ImageType = des.ImageType + } +{{- end }} + cDes.ProxyConfig = canonicalizeNodePoolConfigProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) + + return cDes +} + +func canonicalizeNodePoolConfigSlice(des, initial []NodePoolConfig, opts ...dcl.ApplyOption) []NodePoolConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfig(c *Client, des, nw *NodePoolConfig) *NodePoolConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.VmSize, nw.VmSize) { + nw.VmSize = des.VmSize + } + nw.RootVolume = canonicalizeNewNodePoolConfigRootVolume(c, des.RootVolume, nw.RootVolume) + nw.SshConfig = canonicalizeNewNodePoolConfigSshConfig(c, des.SshConfig, nw.SshConfig) +{{- if ne $.TargetVersionName "ga" }} + if dcl.StringCanonicalize(des.ImageType, nw.ImageType) { + nw.ImageType = des.ImageType + } +{{- end }} + nw.ProxyConfig = canonicalizeNewNodePoolConfigProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) + + return nw +} + +func canonicalizeNewNodePoolConfigSet(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSlice(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigRootVolume(des, initial *NodePoolConfigRootVolume, opts ...dcl.ApplyOption) *NodePoolConfigRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + + return cDes +} + +func canonicalizeNodePoolConfigRootVolumeSlice(des, initial []NodePoolConfigRootVolume, opts ...dcl.ApplyOption) []NodePoolConfigRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigRootVolume(c *Client, des, nw *NodePoolConfigRootVolume) *NodePoolConfigRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolConfigRootVolumeSet(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigRootVolumeSlice(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigSshConfig(des, initial *NodePoolConfigSshConfig, opts ...dcl.ApplyOption) *NodePoolConfigSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigSshConfig{} + + if dcl.StringCanonicalize(des.AuthorizedKey, initial.AuthorizedKey) || dcl.IsZeroValue(des.AuthorizedKey) { + cDes.AuthorizedKey = initial.AuthorizedKey + } else { + cDes.AuthorizedKey = des.AuthorizedKey + } + + return cDes +} + +func canonicalizeNodePoolConfigSshConfigSlice(des, initial []NodePoolConfigSshConfig, opts ...dcl.ApplyOption) []NodePoolConfigSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigSshConfig(c *Client, des, nw *NodePoolConfigSshConfig) *NodePoolConfigSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AuthorizedKey, nw.AuthorizedKey) { + nw.AuthorizedKey = des.AuthorizedKey + } + + return nw +} + +func canonicalizeNewNodePoolConfigSshConfigSet(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSshConfigSlice(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigProxyConfig(des, initial *NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) *NodePoolConfigProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigProxyConfig{} + + if dcl.StringCanonicalize(des.ResourceGroupId, initial.ResourceGroupId) || dcl.IsZeroValue(des.ResourceGroupId) { + cDes.ResourceGroupId = initial.ResourceGroupId + } else { + cDes.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, initial.SecretId) || dcl.IsZeroValue(des.SecretId) { + cDes.SecretId = initial.SecretId + } else { + cDes.SecretId = des.SecretId + } + + return cDes +} + +func canonicalizeNodePoolConfigProxyConfigSlice(des, initial []NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) []NodePoolConfigProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigProxyConfig(c *Client, des, nw *NodePoolConfigProxyConfig) *NodePoolConfigProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ResourceGroupId, nw.ResourceGroupId) { + nw.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, nw.SecretId) { + nw.SecretId = des.SecretId + } + + return nw +} + +func canonicalizeNewNodePoolConfigProxyConfigSet(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigProxyConfigSlice(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolAutoscaling(des, initial *NodePoolAutoscaling, opts ...dcl.ApplyOption) *NodePoolAutoscaling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolAutoscaling{} + + if dcl.IsZeroValue(des.MinNodeCount) || (dcl.IsEmptyValueIndirect(des.MinNodeCount) && dcl.IsEmptyValueIndirect(initial.MinNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinNodeCount = initial.MinNodeCount + } else { + cDes.MinNodeCount = des.MinNodeCount + } + if dcl.IsZeroValue(des.MaxNodeCount) || (dcl.IsEmptyValueIndirect(des.MaxNodeCount) && dcl.IsEmptyValueIndirect(initial.MaxNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxNodeCount = initial.MaxNodeCount + } else { + cDes.MaxNodeCount = des.MaxNodeCount + } + + return cDes +} + +func canonicalizeNodePoolAutoscalingSlice(des, initial []NodePoolAutoscaling, opts ...dcl.ApplyOption) []NodePoolAutoscaling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolAutoscaling, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolAutoscaling, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolAutoscaling(c *Client, des, nw *NodePoolAutoscaling) *NodePoolAutoscaling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolAutoscaling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolAutoscalingSet(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolAutoscaling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolAutoscalingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolAutoscalingSlice(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolAutoscaling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolMaxPodsConstraint(des, initial *NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) *NodePoolMaxPodsConstraint { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolMaxPodsConstraint{} + + if dcl.IsZeroValue(des.MaxPodsPerNode) || (dcl.IsEmptyValueIndirect(des.MaxPodsPerNode) && dcl.IsEmptyValueIndirect(initial.MaxPodsPerNode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxPodsPerNode = initial.MaxPodsPerNode + } else { + cDes.MaxPodsPerNode = des.MaxPodsPerNode + } + + return cDes +} + +func canonicalizeNodePoolMaxPodsConstraintSlice(des, initial []NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) []NodePoolMaxPodsConstraint { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolMaxPodsConstraint(c *Client, des, nw *NodePoolMaxPodsConstraint) *NodePoolMaxPodsConstraint { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolMaxPodsConstraint while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolMaxPodsConstraintSet(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolMaxPodsConstraint + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolMaxPodsConstraintNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolMaxPodsConstraintSlice(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolMaxPodsConstraint + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolManagement(des, initial *NodePoolManagement, opts ...dcl.ApplyOption) *NodePoolManagement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolManagement{} + + if dcl.BoolCanonicalize(des.AutoRepair, initial.AutoRepair) || dcl.IsZeroValue(des.AutoRepair) { + cDes.AutoRepair = initial.AutoRepair + } else { + cDes.AutoRepair = des.AutoRepair + } + + return cDes +} + +func canonicalizeNodePoolManagementSlice(des, initial []NodePoolManagement, opts ...dcl.ApplyOption) []NodePoolManagement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolManagement, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolManagement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolManagement, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolManagement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolManagement(c *Client, des, nw *NodePoolManagement) *NodePoolManagement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolManagement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutoRepair, nw.AutoRepair) { + nw.AutoRepair = des.AutoRepair + } + + return nw +} + +func canonicalizeNewNodePoolManagementSet(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolManagement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolManagementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolManagementSlice(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolManagement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffNodePool(c *Client, desired, actual *NodePool, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigNewStyle, EmptyObject: EmptyNodePoolConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Autoscaling, actual.Autoscaling, dcl.DiffInfo{ObjectFunction: compareNodePoolAutoscalingNewStyle, EmptyObject: EmptyNodePoolAutoscaling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Autoscaling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxPodsConstraint, actual.MaxPodsConstraint, dcl.DiffInfo{ObjectFunction: compareNodePoolMaxPodsConstraintNewStyle, EmptyObject: EmptyNodePoolMaxPodsConstraint, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsConstraint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ObjectFunction: compareNodePoolManagementNewStyle, EmptyObject: EmptyNodePoolManagement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureAvailabilityZone, actual.AzureAvailabilityZone, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AzureAvailabilityZone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareNodePoolConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig or *NodePoolConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.VmSize, actual.VmSize, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VmSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolConfigRootVolumeNewStyle, EmptyObject: EmptyNodePoolConfigRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigSshConfigNewStyle, EmptyObject: EmptyNodePoolConfigSshConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ImageType, actual.ImageType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageType")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigProxyConfigNewStyle, EmptyObject: EmptyNodePoolConfigProxyConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigRootVolume) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume or *NodePoolConfigRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigRootVolume) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigSshConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig or *NodePoolConfigSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigSshConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AuthorizedKey, actual.AuthorizedKey, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("AuthorizedKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigProxyConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig or *NodePoolConfigProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigProxyConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceGroupId, actual.ResourceGroupId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroupId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretId, actual.SecretId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecretId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolAutoscalingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolAutoscaling) + if !ok { + desiredNotPointer, ok := d.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling or *NodePoolAutoscaling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolAutoscaling) + if !ok { + actualNotPointer, ok := a.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinNodeCount, actual.MinNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("MinNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxNodeCount, actual.MaxNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("MaxNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolMaxPodsConstraintNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolMaxPodsConstraint) + if !ok { + desiredNotPointer, ok := d.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint or *NodePoolMaxPodsConstraint", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolMaxPodsConstraint) + if !ok { + actualNotPointer, ok := a.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxPodsPerNode, actual.MaxPodsPerNode, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsPerNode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolManagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolManagement) + if !ok { + desiredNotPointer, ok := d.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement or *NodePoolManagement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolManagement) + if !ok { + actualNotPointer, ok := a.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutoRepair, actual.AutoRepair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("AutoRepair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *NodePool) urlNormalized() *NodePool { + normalized := dcl.Copy(*r).(NodePool) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Version = dcl.SelfLinkToName(r.Version) + normalized.SubnetId = dcl.SelfLinkToName(r.SubnetId) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.AzureAvailabilityZone = dcl.SelfLinkToName(r.AzureAvailabilityZone) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Cluster = dcl.SelfLinkToName(r.Cluster) + return &normalized +} + +func (r *NodePool) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAzureNodePool" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the NodePool resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *NodePool) marshal(c *Client) ([]byte, error) { + m, err := expandNodePool(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling NodePool: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalNodePool decodes JSON responses into the NodePool resource schema. +func unmarshalNodePool(b []byte, c *Client, res *NodePool) (*NodePool, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapNodePool(m, c, res) +} + +func unmarshalMapNodePool(m map[string]interface{}, c *Client, res *NodePool) (*NodePool, error) { + + flattened := flattenNodePool(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandNodePool expands NodePool into a JSON request object. +func expandNodePool(c *Client, f *NodePool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/azureClusters/%s/azureNodePools/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Cluster), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Version; dcl.ValueShouldBeSent(v) { + m["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v := f.SubnetId; dcl.ValueShouldBeSent(v) { + m["subnetId"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscaling"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := expandNodePoolMaxPodsConstraint(c, f.MaxPodsConstraint, res); err != nil { + return nil, fmt.Errorf("error expanding MaxPodsConstraint into maxPodsConstraint: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["maxPodsConstraint"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + if v := f.AzureAvailabilityZone; dcl.ValueShouldBeSent(v) { + m["azureAvailabilityZone"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Cluster into cluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + + return m, nil +} + +// flattenNodePool flattens NodePool from a JSON request object into the +// NodePool type. +func flattenNodePool(c *Client, i interface{}, res *NodePool) *NodePool { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &NodePool{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Version = dcl.FlattenString(m["version"]) + resultRes.Config = flattenNodePoolConfig(c, m["config"], res) + resultRes.SubnetId = dcl.FlattenString(m["subnetId"]) + resultRes.Autoscaling = flattenNodePoolAutoscaling(c, m["autoscaling"], res) + resultRes.State = flattenNodePoolStateEnum(m["state"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.MaxPodsConstraint = flattenNodePoolMaxPodsConstraint(c, m["maxPodsConstraint"], res) + resultRes.Management = flattenNodePoolManagement(c, m["management"], res) + resultRes.AzureAvailabilityZone = dcl.FlattenString(m["azureAvailabilityZone"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Cluster = dcl.FlattenString(m["cluster"]) + + return resultRes +} + +// expandNodePoolConfigMap expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigMap(c *Client, f map[string]NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSlice expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigSlice(c *Client, f []NodePoolConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigMap flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfig{} + } + + items := make(map[string]NodePoolConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSlice flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfig{} + } + + if len(a) == 0 { + return []NodePoolConfig{} + } + + items := make([]NodePoolConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfig expands an instance of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfig(c *Client, f *NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.VmSize; !dcl.IsEmptyValueIndirect(v) { + m["vmSize"] = v + } + if v, err := expandNodePoolConfigRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + if v, err := expandNodePoolConfigSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } +{{- if ne $.TargetVersionName "ga" }} + if v := f.ImageType; !dcl.IsEmptyValueIndirect(v) { + m["imageType"] = v + } +{{- end }} + if v, err := expandNodePoolConfigProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v + } + + return m, nil +} + +// flattenNodePoolConfig flattens an instance of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfig + } + r.VmSize = dcl.FlattenString(m["vmSize"]) + r.RootVolume = flattenNodePoolConfigRootVolume(c, m["rootVolume"], res) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + r.SshConfig = flattenNodePoolConfigSshConfig(c, m["sshConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.ImageType = dcl.FlattenString(m["imageType"]) +{{- end }} + r.ProxyConfig = flattenNodePoolConfigProxyConfig(c, m["proxyConfig"], res) + + return r +} + +// expandNodePoolConfigRootVolumeMap expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeMap(c *Client, f map[string]NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigRootVolumeSlice expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeSlice(c *Client, f []NodePoolConfigRootVolume, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigRootVolumeMap flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigRootVolume{} + } + + items := make(map[string]NodePoolConfigRootVolume) + for k, item := range a { + items[k] = *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigRootVolumeSlice flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return []NodePoolConfigRootVolume{} + } + + items := make([]NodePoolConfigRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigRootVolume expands an instance of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolume(c *Client, f *NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + + return m, nil +} + +// flattenNodePoolConfigRootVolume flattens an instance of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolume(c *Client, i interface{}, res *NodePool) *NodePoolConfigRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + + return r +} + +// expandNodePoolConfigSshConfigMap expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigMap(c *Client, f map[string]NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSshConfigSlice expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigSlice(c *Client, f []NodePoolConfigSshConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigSshConfigMap flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigSshConfig{} + } + + items := make(map[string]NodePoolConfigSshConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSshConfigSlice flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigSshConfig{} + } + + items := make([]NodePoolConfigSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigSshConfig expands an instance of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfig(c *Client, f *NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AuthorizedKey; !dcl.IsEmptyValueIndirect(v) { + m["authorizedKey"] = v + } + + return m, nil +} + +// flattenNodePoolConfigSshConfig flattens an instance of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigSshConfig + } + r.AuthorizedKey = dcl.FlattenString(m["authorizedKey"]) + + return r +} + +// expandNodePoolConfigProxyConfigMap expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigMap(c *Client, f map[string]NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigProxyConfigSlice expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigSlice(c *Client, f []NodePoolConfigProxyConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigProxyConfigMap flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigProxyConfig{} + } + + items := make(map[string]NodePoolConfigProxyConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigProxyConfigSlice flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigProxyConfig{} + } + + items := make([]NodePoolConfigProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigProxyConfig expands an instance of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfig(c *Client, f *NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceGroupId; !dcl.IsEmptyValueIndirect(v) { + m["resourceGroupId"] = v + } + if v := f.SecretId; !dcl.IsEmptyValueIndirect(v) { + m["secretId"] = v + } + + return m, nil +} + +// flattenNodePoolConfigProxyConfig flattens an instance of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigProxyConfig + } + r.ResourceGroupId = dcl.FlattenString(m["resourceGroupId"]) + r.SecretId = dcl.FlattenString(m["secretId"]) + + return r +} + +// expandNodePoolAutoscalingMap expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingMap(c *Client, f map[string]NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolAutoscalingSlice expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingSlice(c *Client, f []NodePoolAutoscaling, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolAutoscalingMap flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolAutoscaling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolAutoscaling{} + } + + if len(a) == 0 { + return map[string]NodePoolAutoscaling{} + } + + items := make(map[string]NodePoolAutoscaling) + for k, item := range a { + items[k] = *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolAutoscalingSlice flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingSlice(c *Client, i interface{}, res *NodePool) []NodePoolAutoscaling { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolAutoscaling{} + } + + if len(a) == 0 { + return []NodePoolAutoscaling{} + } + + items := make([]NodePoolAutoscaling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolAutoscaling expands an instance of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscaling(c *Client, f *NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["minNodeCount"] = v + } + if v := f.MaxNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["maxNodeCount"] = v + } + + return m, nil +} + +// flattenNodePoolAutoscaling flattens an instance of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscaling(c *Client, i interface{}, res *NodePool) *NodePoolAutoscaling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolAutoscaling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolAutoscaling + } + r.MinNodeCount = dcl.FlattenInteger(m["minNodeCount"]) + r.MaxNodeCount = dcl.FlattenInteger(m["maxNodeCount"]) + + return r +} + +// expandNodePoolMaxPodsConstraintMap expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintMap(c *Client, f map[string]NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolMaxPodsConstraintSlice expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintSlice(c *Client, f []NodePoolMaxPodsConstraint, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolMaxPodsConstraintMap flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolMaxPodsConstraint { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return map[string]NodePoolMaxPodsConstraint{} + } + + items := make(map[string]NodePoolMaxPodsConstraint) + for k, item := range a { + items[k] = *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolMaxPodsConstraintSlice flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintSlice(c *Client, i interface{}, res *NodePool) []NodePoolMaxPodsConstraint { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return []NodePoolMaxPodsConstraint{} + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolMaxPodsConstraint expands an instance of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraint(c *Client, f *NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxPodsPerNode; !dcl.IsEmptyValueIndirect(v) { + m["maxPodsPerNode"] = v + } + + return m, nil +} + +// flattenNodePoolMaxPodsConstraint flattens an instance of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraint(c *Client, i interface{}, res *NodePool) *NodePoolMaxPodsConstraint { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolMaxPodsConstraint{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolMaxPodsConstraint + } + r.MaxPodsPerNode = dcl.FlattenInteger(m["maxPodsPerNode"]) + + return r +} + +// expandNodePoolManagementMap expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementMap(c *Client, f map[string]NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolManagementSlice expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementSlice(c *Client, f []NodePoolManagement, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolManagementMap flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolManagement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolManagement{} + } + + if len(a) == 0 { + return map[string]NodePoolManagement{} + } + + items := make(map[string]NodePoolManagement) + for k, item := range a { + items[k] = *flattenNodePoolManagement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolManagementSlice flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementSlice(c *Client, i interface{}, res *NodePool) []NodePoolManagement { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolManagement{} + } + + if len(a) == 0 { + return []NodePoolManagement{} + } + + items := make([]NodePoolManagement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolManagement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolManagement expands an instance of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagement(c *Client, f *NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutoRepair; !dcl.IsEmptyValueIndirect(v) { + m["autoRepair"] = v + } + + return m, nil +} + +// flattenNodePoolManagement flattens an instance of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagement(c *Client, i interface{}, res *NodePool) *NodePoolManagement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolManagement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolManagement + } + r.AutoRepair = dcl.FlattenBool(m["autoRepair"]) + + return r +} + +// flattenNodePoolStateEnumMap flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolStateEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolStateEnum{} + } + + items := make(map[string]NodePoolStateEnum) + for k, item := range a { + items[k] = *flattenNodePoolStateEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolStateEnumSlice flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolStateEnum{} + } + + if len(a) == 0 { + return []NodePoolStateEnum{} + } + + items := make([]NodePoolStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolStateEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolStateEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolStateEnum with the same value as that string. +func flattenNodePoolStateEnum(i interface{}) *NodePoolStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolStateEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *NodePool) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalNodePool(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Cluster == nil && ncr.Cluster == nil { + c.Config.Logger.Info("Both Cluster fields null - considering equal.") + } else if nr.Cluster == nil || ncr.Cluster == nil { + c.Config.Logger.Info("Only one Cluster field is null - considering unequal.") + return false + } else if *nr.Cluster != *ncr.Cluster { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type nodePoolDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp nodePoolApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToNodePoolDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]nodePoolDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []nodePoolDiff + // For each operation name, create a nodePoolDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := nodePoolDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToNodePoolApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToNodePoolApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (nodePoolApiOperation, error) { + switch opName { + + case "updateNodePoolUpdateAzureNodePoolOperation": + return &updateNodePoolUpdateAzureNodePoolOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := extractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := extractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := extractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := extractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + return nil +} +func extractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func extractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func extractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +func extractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { + return nil +} +func extractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func extractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func extractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} + +func postReadExtractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := postReadExtractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := postReadExtractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := postReadExtractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := postReadExtractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + return nil +} +func postReadExtractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func postReadExtractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func postReadExtractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +func postReadExtractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { + return nil +} +func postReadExtractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func postReadExtractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func postReadExtractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go new file mode 100644 index 000000000000..e20d1a835114 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLContainerAzureClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ContainerAzureBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go new file mode 100644 index 000000000000..9892900b9e40 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go @@ -0,0 +1,272 @@ +package containerazure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAzureClient() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureClientCreate, + Read: resourceContainerAzureClientRead, + Delete: resourceContainerAzureClientDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureClientImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "application_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure Active Directory Application ID.", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "tenant_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure Active Directory Tenant ID.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "certificate": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The PEM encoded x509 certificate.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this resource was created.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the client.", + }, + }, + } +} + +func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyClient(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Client: %s", err) + } + + log.Printf("[DEBUG] Finished creating Client %q: %#v", d.Id(), res) + + return resourceContainerAzureClientRead(d, meta) +} + +func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetClient(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureClient %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("application_id", res.ApplicationId); err != nil { + return fmt.Errorf("error setting application_id in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("tenant_id", res.TenantId); err != nil { + return fmt.Errorf("error setting tenant_id in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("certificate", res.Certificate); err != nil { + return fmt.Errorf("error setting certificate in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + + return nil +} + +func resourceContainerAzureClientDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Client %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteClient(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Client: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Client %q", d.Id()) + return nil +} + +func resourceContainerAzureClientImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClients/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go new file mode 100644 index 000000000000..b18b2bfd91b1 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go @@ -0,0 +1,93 @@ +package containerazure_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccContainerAzureClient_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAzureClientDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureClient_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_client.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerAzureClient_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_azure_client" "primary" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +`, context) +} + +func testAccCheckContainerAzureClientDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_client" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.AzureClient{ + ApplicationId: dcl.String(rs.Primary.Attributes["application_id"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + TenantId: dcl.String(rs.Primary.Attributes["tenant_id"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + Certificate: dcl.StringOrNil(rs.Primary.Attributes["certificate"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + } + + client := containerazure.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetClient(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_client still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml index c1a24a3b786f..733b8c81196f 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_container_azure_client' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AzureClient' diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl new file mode 100644 index 000000000000..4a14dfde99ef --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl @@ -0,0 +1,1433 @@ +package containerazure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAzureCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureClusterCreate, + Read: resourceContainerAzureClusterRead, + Update: resourceContainerAzureClusterUpdate, + Delete: resourceContainerAzureClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "authorization": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster RBAC settings.", + MaxItems: 1, + Elem: ContainerAzureClusterAuthorizationSchema(), + }, + + "azure_region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region.", + }, + + "control_plane": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster control plane.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneSchema(), + }, + + "fleet": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Fleet configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterFleetSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "networking": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Cluster-wide networking configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterNetworkingSchema(), + }, + + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*`", + }, + + "azure_services_authentication": { + Type: schema.TypeList, + Optional: true, + Description: "Azure authentication configuration for management of Azure resources", + MaxItems: 1, + Elem: ContainerAzureClusterAzureServicesAuthenticationSchema(), + ConflictsWith: []string{"client"}, + }, + + "client": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names.", + ConflictsWith: []string{"azure_services_authentication"}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + +{{- if ne $.TargetVersionName "ga" }} + "logging_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Logging configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterLoggingConfigSchema(), + }, + +{{- end }} + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was created.", + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The endpoint of the cluster's API server.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the cluster.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was last updated.", + }, + + "workload_identity_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Workload Identity settings.", + Elem: ContainerAzureClusterWorkloadIdentityConfigSchema(), + }, + }, + } +} + +func ContainerAzureClusterAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_users": { + Type: schema.TypeList, + Required: true, + Description: "Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAzureClusterAuthorizationAdminUsersSchema(), + }, + + "admin_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAzureClusterAuthorizationAdminGroupsSchema(), + }, + }, + } +} + +func ContainerAzureClusterAuthorizationAdminUsersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + } +} + +func ContainerAzureClusterAuthorizationAdminGroupsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + Description: "The name of the group, e.g. `my-group@domain.com`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_config": { + Type: schema.TypeList, + Required: true, + Description: "SSH configuration for how to access the underlying control plane machines.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneSshConfigSchema(), + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig.", + }, + + "database_encryption": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to application-layer secrets encryption.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneDatabaseEncryptionSchema(), + }, + + "main_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneMainVolumeSchema(), + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneProxyConfigSchema(), + }, + + "replica_placements": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible.", + Elem: ContainerAzureClusterControlPlaneReplicaPlacementsSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneRootVolumeSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of tags to apply to all underlying control plane Azure resources.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_key": { + Type: schema.TypeString, + Required: true, + Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneDatabaseEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneMainVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + }, + + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneReplicaPlacementsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "azure_availability_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureClusterFleetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The number of the Fleet host project where this cluster will be registered.", + }, + + "membership": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + }, + }, + } +} + +func ContainerAzureClusterNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "virtual_network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation.", + }, + }, + } +} + +func ContainerAzureClusterAzureServicesAuthenticationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application_id": { + Type: schema.TypeString, + Required: true, + Description: "The Azure Active Directory Application ID for Authentication configuration.", + }, + + "tenant_id": { + Type: schema.TypeString, + Required: true, + Description: "The Azure Active Directory Tenant ID for Authentication configuration.", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAzureClusterLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration of the logging components.", + MaxItems: 1, + Elem: ContainerAzureClusterLoggingConfigComponentConfigSchema(), + }, + }, + } +} + +func ContainerAzureClusterLoggingConfigComponentConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Components of the logging configuration to be enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- end }} + }, + }, + } +} + +func ContainerAzureClusterWorkloadIdentityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + }, + + "issuer_uri": { + Type: schema.TypeString, + Computed: true, + Description: "The OIDC issuer URL for this cluster.", + }, + + "workload_pool": { + Type: schema.TypeString, + Computed: true, + Description: "The Workload Identity Pool associated to the cluster.", + }, + }, + } +} + +func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAzureClusterRead(d, meta) +} + +func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCluster(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureCluster %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("authorization", flattenContainerAzureClusterAuthorization(res.Authorization)); err != nil { + return fmt.Errorf("error setting authorization in state: %s", err) + } + if err = d.Set("azure_region", res.AzureRegion); err != nil { + return fmt.Errorf("error setting azure_region in state: %s", err) + } + if err = d.Set("control_plane", flattenContainerAzureClusterControlPlane(res.ControlPlane)); err != nil { + return fmt.Errorf("error setting control_plane in state: %s", err) + } + if err = d.Set("fleet", flattenContainerAzureClusterFleet(res.Fleet)); err != nil { + return fmt.Errorf("error setting fleet in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("networking", flattenContainerAzureClusterNetworking(res.Networking)); err != nil { + return fmt.Errorf("error setting networking in state: %s", err) + } + if err = d.Set("resource_group_id", res.ResourceGroupId); err != nil { + return fmt.Errorf("error setting resource_group_id in state: %s", err) + } + if err = d.Set("azure_services_authentication", flattenContainerAzureClusterAzureServicesAuthentication(res.AzureServicesAuthentication)); err != nil { + return fmt.Errorf("error setting azure_services_authentication in state: %s", err) + } + if err = d.Set("client", res.Client); err != nil { + return fmt.Errorf("error setting client in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) +{{- if ne $.TargetVersionName "ga" }} + } + if err = d.Set("logging_config", flattenContainerAzureClusterLoggingConfig(res.LoggingConfig)); err != nil { + return fmt.Errorf("error setting logging_config in state: %s", err) +{{- end }} + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAzureClusterAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("endpoint", res.Endpoint); err != nil { + return fmt.Errorf("error setting endpoint in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + if err = d.Set("workload_identity_config", flattenContainerAzureClusterWorkloadIdentityConfig(res.WorkloadIdentityConfig)); err != nil { + return fmt.Errorf("error setting workload_identity_config in state: %s", err) + } + + return nil +} +func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + directive := dcl.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAzureClusterRead(d, meta) +} + +func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCluster(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Cluster %q", d.Id()) + return nil +} + +func resourceContainerAzureClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAzureClusterAuthorization(o interface{}) *ClusterAuthorization { + if o == nil { + return EmptyClusterAuthorization + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterAuthorization + } + obj := objArr[0].(map[string]interface{}) + return &ClusterAuthorization{ + AdminUsers: expandContainerAzureClusterAuthorizationAdminUsersArray(obj["admin_users"]), + AdminGroups: expandContainerAzureClusterAuthorizationAdminGroupsArray(obj["admin_groups"]), + } +} + +func flattenContainerAzureClusterAuthorization(obj *ClusterAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "admin_users": flattenContainerAzureClusterAuthorizationAdminUsersArray(obj.AdminUsers), + "admin_groups": flattenContainerAzureClusterAuthorizationAdminGroupsArray(obj.AdminGroups), + } + + return []interface{}{transformed} + +} +func expandContainerAzureClusterAuthorizationAdminUsersArray(o interface{}) []ClusterAuthorizationAdminUsers { + if o == nil { + return make([]ClusterAuthorizationAdminUsers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]ClusterAuthorizationAdminUsers, 0) + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterAuthorizationAdminUsers(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterAuthorizationAdminUsers(o interface{}) *ClusterAuthorizationAdminUsers { + if o == nil { + return EmptyClusterAuthorizationAdminUsers + } + + obj := o.(map[string]interface{}) + return &ClusterAuthorizationAdminUsers{ + Username: dcl.String(obj["username"].(string)), + } +} + +func flattenContainerAzureClusterAuthorizationAdminUsersArray(objs []ClusterAuthorizationAdminUsers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterAuthorizationAdminUsers(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterAuthorizationAdminUsers(obj *ClusterAuthorizationAdminUsers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "username": obj.Username, + } + + return transformed + +} +func expandContainerAzureClusterAuthorizationAdminGroupsArray(o interface{}) []ClusterAuthorizationAdminGroups { + if o == nil { + return make([]ClusterAuthorizationAdminGroups, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]ClusterAuthorizationAdminGroups, 0) + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterAuthorizationAdminGroups(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterAuthorizationAdminGroups(o interface{}) *ClusterAuthorizationAdminGroups { + if o == nil { + return EmptyClusterAuthorizationAdminGroups + } + + obj := o.(map[string]interface{}) + return &ClusterAuthorizationAdminGroups{ + Group: dcl.String(obj["group"].(string)), + } +} + +func flattenContainerAzureClusterAuthorizationAdminGroupsArray(objs []ClusterAuthorizationAdminGroups) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterAuthorizationAdminGroups(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterAuthorizationAdminGroups(obj *ClusterAuthorizationAdminGroups) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "group": obj.Group, + } + + return transformed + +} + +func expandContainerAzureClusterControlPlane(o interface{}) *ClusterControlPlane { + if o == nil { + return EmptyClusterControlPlane + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlane + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlane{ + SshConfig: expandContainerAzureClusterControlPlaneSshConfig(obj["ssh_config"]), + SubnetId: dcl.String(obj["subnet_id"].(string)), + Version: dcl.String(obj["version"].(string)), + DatabaseEncryption: expandContainerAzureClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), + MainVolume: expandContainerAzureClusterControlPlaneMainVolume(obj["main_volume"]), + ProxyConfig: expandContainerAzureClusterControlPlaneProxyConfig(obj["proxy_config"]), + ReplicaPlacements: expandContainerAzureClusterControlPlaneReplicaPlacementsArray(obj["replica_placements"]), + RootVolume: expandContainerAzureClusterControlPlaneRootVolume(obj["root_volume"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + VmSize: dcl.StringOrNil(obj["vm_size"].(string)), + } +} + +func flattenContainerAzureClusterControlPlane(obj *ClusterControlPlane) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ssh_config": flattenContainerAzureClusterControlPlaneSshConfig(obj.SshConfig), + "subnet_id": obj.SubnetId, + "version": obj.Version, + "database_encryption": flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj.DatabaseEncryption), + "main_volume": flattenContainerAzureClusterControlPlaneMainVolume(obj.MainVolume), + "proxy_config": flattenContainerAzureClusterControlPlaneProxyConfig(obj.ProxyConfig), + "replica_placements": flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(obj.ReplicaPlacements), + "root_volume": flattenContainerAzureClusterControlPlaneRootVolume(obj.RootVolume), + "tags": obj.Tags, + "vm_size": obj.VmSize, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneSshConfig(o interface{}) *ClusterControlPlaneSshConfig { + if o == nil { + return EmptyClusterControlPlaneSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneSshConfig{ + AuthorizedKey: dcl.String(obj["authorized_key"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneSshConfig(obj *ClusterControlPlaneSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorized_key": obj.AuthorizedKey, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneDatabaseEncryption(o interface{}) *ClusterControlPlaneDatabaseEncryption { + if o == nil { + return EmptyClusterControlPlaneDatabaseEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneDatabaseEncryption + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneDatabaseEncryption{ + KeyId: dcl.String(obj["key_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj *ClusterControlPlaneDatabaseEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "key_id": obj.KeyId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneMainVolume(o interface{}) *ClusterControlPlaneMainVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneMainVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureClusterControlPlaneMainVolume(obj *ClusterControlPlaneMainVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneProxyConfig(o interface{}) *ClusterControlPlaneProxyConfig { + if o == nil { + return EmptyClusterControlPlaneProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterControlPlaneProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneProxyConfig{ + ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), + SecretId: dcl.String(obj["secret_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneProxyConfig(obj *ClusterControlPlaneProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_group_id": obj.ResourceGroupId, + "secret_id": obj.SecretId, + } + + return []interface{}{transformed} + +} +func expandContainerAzureClusterControlPlaneReplicaPlacementsArray(o interface{}) []ClusterControlPlaneReplicaPlacements { + if o == nil { + return make([]ClusterControlPlaneReplicaPlacements, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]ClusterControlPlaneReplicaPlacements, 0) + } + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterControlPlaneReplicaPlacements(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterControlPlaneReplicaPlacements(o interface{}) *ClusterControlPlaneReplicaPlacements { + if o == nil { + return EmptyClusterControlPlaneReplicaPlacements + } + + obj := o.(map[string]interface{}) + return &ClusterControlPlaneReplicaPlacements{ + AzureAvailabilityZone: dcl.String(obj["azure_availability_zone"].(string)), + SubnetId: dcl.String(obj["subnet_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(objs []ClusterControlPlaneReplicaPlacements) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterControlPlaneReplicaPlacements(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterControlPlaneReplicaPlacements(obj *ClusterControlPlaneReplicaPlacements) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "azure_availability_zone": obj.AzureAvailabilityZone, + "subnet_id": obj.SubnetId, + } + + return transformed + +} + +func expandContainerAzureClusterControlPlaneRootVolume(o interface{}) *ClusterControlPlaneRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterControlPlaneRootVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureClusterControlPlaneRootVolume(obj *ClusterControlPlaneRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterFleet(o interface{}) *ClusterFleet { + if o == nil { + return EmptyClusterFleet + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterFleet + } + obj := objArr[0].(map[string]interface{}) + return &ClusterFleet{ + Project: dcl.StringOrNil(obj["project"].(string)), + } +} + +func flattenContainerAzureClusterFleet(obj *ClusterFleet) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "project": obj.Project, + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterNetworking(o interface{}) *ClusterNetworking { + if o == nil { + return EmptyClusterNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterNetworking + } + obj := objArr[0].(map[string]interface{}) + return &ClusterNetworking{ + PodAddressCidrBlocks: dcl.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: dcl.ExpandStringArray(obj["service_address_cidr_blocks"]), + VirtualNetworkId: dcl.String(obj["virtual_network_id"].(string)), + } +} + +func flattenContainerAzureClusterNetworking(obj *ClusterNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "virtual_network_id": obj.VirtualNetworkId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterAzureServicesAuthentication(o interface{}) *ClusterAzureServicesAuthentication { + if o == nil { + return EmptyClusterAzureServicesAuthentication + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyClusterAzureServicesAuthentication + } + obj := objArr[0].(map[string]interface{}) + return &ClusterAzureServicesAuthentication{ + ApplicationId: dcl.String(obj["application_id"].(string)), + TenantId: dcl.String(obj["tenant_id"].(string)), + } +} + +func flattenContainerAzureClusterAzureServicesAuthentication(obj *ClusterAzureServicesAuthentication) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "application_id": obj.ApplicationId, + "tenant_id": obj.TenantId, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAzureClusterLoggingConfig(o interface{}) *ClusterLoggingConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterLoggingConfig{ + ComponentConfig: expandContainerAzureClusterLoggingConfigComponentConfig(obj["component_config"]), + } +} + +func flattenContainerAzureClusterLoggingConfig(obj *ClusterLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "component_config": flattenContainerAzureClusterLoggingConfigComponentConfig(obj.ComponentConfig), + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterLoggingConfigComponentConfig(o interface{}) *ClusterLoggingConfigComponentConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ClusterLoggingConfigComponentConfig{ + EnableComponents: expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj["enable_components"]), + } +} + +func flattenContainerAzureClusterLoggingConfigComponentConfig(obj *ClusterLoggingConfigComponentConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_components": flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj.EnableComponents), + } + + return []interface{}{transformed} + +} + +{{- end }} +func flattenContainerAzureClusterWorkloadIdentityConfig(obj *ClusterWorkloadIdentityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "identity_provider": obj.IdentityProvider, + "issuer_uri": obj.IssuerUri, + "workload_pool": obj.WorkloadPool, + } + + return []interface{}{transformed} + +} + +func flattenContainerAzureClusterAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} +{{- if ne $.TargetVersionName "ga" }} + +func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj []ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { + objs := o.([]interface{}) + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl new file mode 100644 index 000000000000..9cf452ced8b9 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl @@ -0,0 +1,667 @@ +package containerazure_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccContainerAzureCluster_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAzureCluster_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAzureCluster_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAzureCluster_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +func TestAccContainerAzureCluster_BetaBasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BetaBasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAzureCluster_BetaBasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAzureCluster_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + admin_groups { + group = "group@domain.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + admin_groups { + group = "group@domain.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { +{{- if ne $.TargetVersionName "ga" }} + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta +{{- end }} + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccCheckContainerAzureClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.Cluster{ + AzureRegion: dcl.String(rs.Primary.Attributes["azure_region"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + ResourceGroupId: dcl.String(rs.Primary.Attributes["resource_group_id"]), + Client: dcl.String(rs.Primary.Attributes["client"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Endpoint: dcl.StringOrNil(rs.Primary.Attributes["endpoint"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containerazure.ClusterStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := containerazure.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetCluster(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_cluster still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl index 0238e7b209e7..54598e64ffe5 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_azure_cluster' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AzureCluster' diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl new file mode 100644 index 000000000000..1bb03cce5412 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl @@ -0,0 +1,836 @@ +package containerazure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAzureNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureNodePoolCreate, + Read: resourceContainerAzureNodePoolRead, + Update: resourceContainerAzureNodePoolUpdate, + Delete: resourceContainerAzureNodePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureNodePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Required: true, + Description: "Autoscaler configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolAutoscalingSchema(), + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The azureCluster for the resource", + }, + + "config": { + Type: schema.TypeList, + Required: true, + Description: "The node configuration of the node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "max_pods_constraint": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolMaxPodsConstraintSchema(), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool.", + }, + + "azure_availability_zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "management": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "The Management configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolManagementSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently pending changes to the node pool.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the node pool.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was last updated.", + }, + }, + } +} + +func ContainerAzureNodePoolAutoscalingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Maximum number of nodes in the node pool. Must be >= min_node_count.", + }, + + "min_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_config": { + Type: schema.TypeList, + Required: true, + Description: "SSH configuration for how to access the node pool machines.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigSshConfigSchema(), +{{- if ne $.TargetVersionName "ga" }} + }, + + "image_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The OS image type to use on node pool instances.", +{{- end }} + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigRootVolumeSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_key": { + Type: schema.TypeString, + Required: true, + Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + }, + + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureNodePoolMaxPodsConstraintSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_pods_per_node": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The maximum number of pods to schedule on a single node.", + }, + }, + } +} + +func ContainerAzureNodePoolManagementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Optional. Whether or not the nodes will be automatically repaired.", + }, + }, + } +} + +func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAzureNodePoolRead(d, meta) +} + +func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetNodePool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureNodePool %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("autoscaling", flattenContainerAzureNodePoolAutoscaling(res.Autoscaling)); err != nil { + return fmt.Errorf("error setting autoscaling in state: %s", err) + } + if err = d.Set("cluster", res.Cluster); err != nil { + return fmt.Errorf("error setting cluster in state: %s", err) + } + if err = d.Set("config", flattenContainerAzureNodePoolConfig(res.Config)); err != nil { + return fmt.Errorf("error setting config in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("max_pods_constraint", flattenContainerAzureNodePoolMaxPodsConstraint(res.MaxPodsConstraint)); err != nil { + return fmt.Errorf("error setting max_pods_constraint in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("subnet_id", res.SubnetId); err != nil { + return fmt.Errorf("error setting subnet_id in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("azure_availability_zone", res.AzureAvailabilityZone); err != nil { + return fmt.Errorf("error setting azure_availability_zone in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("management", flattenContainerAzureNodePoolManagement(res.Management)); err != nil { + return fmt.Errorf("error setting management in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAzureNodePoolAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + directive := dcl.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAzureNodePoolRead(d, meta) +} + +func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteNodePool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting NodePool %q", d.Id()) + return nil +} + +func resourceContainerAzureNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)/azureNodePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAzureNodePoolAutoscaling(o interface{}) *NodePoolAutoscaling { + if o == nil { + return EmptyNodePoolAutoscaling + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolAutoscaling + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolAutoscaling{ + MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), + MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), + } +} + +func flattenContainerAzureNodePoolAutoscaling(obj *NodePoolAutoscaling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_node_count": obj.MaxNodeCount, + "min_node_count": obj.MinNodeCount, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfig(o interface{}) *NodePoolConfig { + if o == nil { + return EmptyNodePoolConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfig{ + SshConfig: expandContainerAzureNodePoolConfigSshConfig(obj["ssh_config"]), +{{- if ne $.TargetVersionName "ga" }} + ImageType: dcl.StringOrNil(obj["image_type"].(string)), +{{- end }} + Labels: tpgresource.CheckStringMap(obj["labels"]), + ProxyConfig: expandContainerAzureNodePoolConfigProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAzureNodePoolConfigRootVolume(obj["root_volume"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + VmSize: dcl.StringOrNil(obj["vm_size"].(string)), + } +} + +func flattenContainerAzureNodePoolConfig(obj *NodePoolConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ssh_config": flattenContainerAzureNodePoolConfigSshConfig(obj.SshConfig), +{{- if ne $.TargetVersionName "ga" }} + "image_type": obj.ImageType, +{{- end }} + "labels": obj.Labels, + "proxy_config": flattenContainerAzureNodePoolConfigProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAzureNodePoolConfigRootVolume(obj.RootVolume), + "tags": obj.Tags, + "vm_size": obj.VmSize, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigSshConfig(o interface{}) *NodePoolConfigSshConfig { + if o == nil { + return EmptyNodePoolConfigSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigSshConfig{ + AuthorizedKey: dcl.String(obj["authorized_key"].(string)), + } +} + +func flattenContainerAzureNodePoolConfigSshConfig(obj *NodePoolConfigSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorized_key": obj.AuthorizedKey, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigProxyConfig(o interface{}) *NodePoolConfigProxyConfig { + if o == nil { + return EmptyNodePoolConfigProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolConfigProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigProxyConfig{ + ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), + SecretId: dcl.String(obj["secret_id"].(string)), + } +} + +func flattenContainerAzureNodePoolConfigProxyConfig(obj *NodePoolConfigProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_group_id": obj.ResourceGroupId, + "secret_id": obj.SecretId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigRootVolume(o interface{}) *NodePoolConfigRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolConfigRootVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureNodePoolConfigRootVolume(obj *NodePoolConfigRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolMaxPodsConstraint(o interface{}) *NodePoolMaxPodsConstraint { + if o == nil { + return EmptyNodePoolMaxPodsConstraint + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyNodePoolMaxPodsConstraint + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolMaxPodsConstraint{ + MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), + } +} + +func flattenContainerAzureNodePoolMaxPodsConstraint(obj *NodePoolMaxPodsConstraint) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_pods_per_node": obj.MaxPodsPerNode, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolManagement(o interface{}) *NodePoolManagement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &NodePoolManagement{ + AutoRepair: dcl.Bool(obj["auto_repair"].(bool)), + } +} + +func flattenContainerAzureNodePoolManagement(obj *NodePoolManagement) interface{} { + if obj == nil { + return nil + } + transformed := make(map[string]interface{}) + + if obj.AutoRepair == nil || obj.Empty() { + transformed["auto_repair"] = false + } else { + transformed["auto_repair"] = obj.AutoRepair + } + + return []interface{}{transformed} +} + +func flattenContainerAzureNodePoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl new file mode 100644 index 000000000000..5f46d012f13f --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl @@ -0,0 +1,594 @@ +package containerazure_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccContainerAzureNodePool_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAzureNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureNodePool_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAzureNodePool_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAzureNodePool_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAzureNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureNodePool_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAzureNodePool_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAzureNodePool_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_one = "label_one" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-one = "value-one" + } + + management { + auto_repair = true + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_two = "label_two" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-two = "value-two" + } + + management { + auto_repair = false +{{- if ne $.TargetVersionName "ga" }} + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_one = "label_one" + } + + vm_size = "Standard_DS2_v2" + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + management { + auto_repair = true + } + + annotations = { + annotation-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_two = "label_two" + } + + vm_size = "Standard_DS2_v2" + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + management { + auto_repair = false + } + + annotations = { + annotation-two = "value-two" +{{- end }} + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAzureNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_node_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.NodePool{ + Cluster: dcl.String(rs.Primary.Attributes["cluster"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + SubnetId: dcl.String(rs.Primary.Attributes["subnet_id"]), + Version: dcl.String(rs.Primary.Attributes["version"]), + AzureAvailabilityZone: dcl.StringOrNil(rs.Primary.Attributes["azure_availability_zone"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containerazure.NodePoolStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := containerazure.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetNodePool(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_node_pool still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl index fa47f16b6f8c..ef92715fe513 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_azure_node_pool' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AzureNodePool' diff --git a/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl new file mode 100644 index 000000000000..70636673ad5f --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl @@ -0,0 +1,1000 @@ +package dataplex + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Asset struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *AssetStateEnum `json:"state"` + ResourceSpec *AssetResourceSpec `json:"resourceSpec"` + ResourceStatus *AssetResourceStatus `json:"resourceStatus"` + SecurityStatus *AssetSecurityStatus `json:"securityStatus"` + DiscoverySpec *AssetDiscoverySpec `json:"discoverySpec"` + DiscoveryStatus *AssetDiscoveryStatus `json:"discoveryStatus"` + Project *string `json:"project"` + Location *string `json:"location"` + Lake *string `json:"lake"` + DataplexZone *string `json:"dataplexZone"` +} + +func (r *Asset) String() string { + return dcl.SprintResource(r) +} + +// The enum AssetStateEnum. +type AssetStateEnum string + +// AssetStateEnumRef returns a *AssetStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetStateEnumRef(s string) *AssetStateEnum { + v := AssetStateEnum(s) + return &v +} + +func (v AssetStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ACTIVE", "CREATING", "DELETING", "ACTION_REQUIRED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetResourceSpecTypeEnum. +type AssetResourceSpecTypeEnum string + +// AssetResourceSpecTypeEnumRef returns a *AssetResourceSpecTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetResourceSpecTypeEnumRef(s string) *AssetResourceSpecTypeEnum { + v := AssetResourceSpecTypeEnum(s) + return &v +} + +func (v AssetResourceSpecTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STORAGE_BUCKET", "BIGQUERY_DATASET"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetResourceSpecTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetResourceSpecReadAccessModeEnum. +type AssetResourceSpecReadAccessModeEnum string + +// AssetResourceSpecReadAccessModeEnumRef returns a *AssetResourceSpecReadAccessModeEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetResourceSpecReadAccessModeEnumRef(s string) *AssetResourceSpecReadAccessModeEnum { + v := AssetResourceSpecReadAccessModeEnum(s) + return &v +} + +func (v AssetResourceSpecReadAccessModeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"DIRECT", "MANAGED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetResourceSpecReadAccessModeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetResourceStatusStateEnum. +type AssetResourceStatusStateEnum string + +// AssetResourceStatusStateEnumRef returns a *AssetResourceStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetResourceStatusStateEnumRef(s string) *AssetResourceStatusStateEnum { + v := AssetResourceStatusStateEnum(s) + return &v +} + +func (v AssetResourceStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "READY", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetResourceStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetSecurityStatusStateEnum. +type AssetSecurityStatusStateEnum string + +// AssetSecurityStatusStateEnumRef returns a *AssetSecurityStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetSecurityStatusStateEnumRef(s string) *AssetSecurityStatusStateEnum { + v := AssetSecurityStatusStateEnum(s) + return &v +} + +func (v AssetSecurityStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "READY", "APPLYING", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetSecurityStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetDiscoveryStatusStateEnum. +type AssetDiscoveryStatusStateEnum string + +// AssetDiscoveryStatusStateEnumRef returns a *AssetDiscoveryStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetDiscoveryStatusStateEnumRef(s string) *AssetDiscoveryStatusStateEnum { + v := AssetDiscoveryStatusStateEnum(s) + return &v +} + +func (v AssetDiscoveryStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "SCHEDULED", "IN_PROGRESS", "PAUSED", "DISABLED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetDiscoveryStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +type AssetResourceSpec struct { + empty bool `json:"-"` + Name *string `json:"name"` + Type *AssetResourceSpecTypeEnum `json:"type"` + ReadAccessMode *AssetResourceSpecReadAccessModeEnum `json:"readAccessMode"` +} + +type jsonAssetResourceSpec AssetResourceSpec + +func (r *AssetResourceSpec) UnmarshalJSON(data []byte) error { + var res jsonAssetResourceSpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetResourceSpec + } else { + + r.Name = res.Name + + r.Type = res.Type + + r.ReadAccessMode = res.ReadAccessMode + + } + return nil +} + +// This object is used to assert a desired state where this AssetResourceSpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetResourceSpec *AssetResourceSpec = &AssetResourceSpec{empty: true} + +func (r *AssetResourceSpec) Empty() bool { + return r.empty +} + +func (r *AssetResourceSpec) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetResourceSpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetResourceStatus struct { + empty bool `json:"-"` + State *AssetResourceStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` +} + +type jsonAssetResourceStatus AssetResourceStatus + +func (r *AssetResourceStatus) UnmarshalJSON(data []byte) error { + var res jsonAssetResourceStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetResourceStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this AssetResourceStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetResourceStatus *AssetResourceStatus = &AssetResourceStatus{empty: true} + +func (r *AssetResourceStatus) Empty() bool { + return r.empty +} + +func (r *AssetResourceStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetResourceStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetSecurityStatus struct { + empty bool `json:"-"` + State *AssetSecurityStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` +} + +type jsonAssetSecurityStatus AssetSecurityStatus + +func (r *AssetSecurityStatus) UnmarshalJSON(data []byte) error { + var res jsonAssetSecurityStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetSecurityStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this AssetSecurityStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetSecurityStatus *AssetSecurityStatus = &AssetSecurityStatus{empty: true} + +func (r *AssetSecurityStatus) Empty() bool { + return r.empty +} + +func (r *AssetSecurityStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetSecurityStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoverySpec struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + IncludePatterns []string `json:"includePatterns"` + ExcludePatterns []string `json:"excludePatterns"` + CsvOptions *AssetDiscoverySpecCsvOptions `json:"csvOptions"` + JsonOptions *AssetDiscoverySpecJsonOptions `json:"jsonOptions"` + Schedule *string `json:"schedule"` +} + +type jsonAssetDiscoverySpec AssetDiscoverySpec + +func (r *AssetDiscoverySpec) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoverySpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoverySpec + } else { + + r.Enabled = res.Enabled + + r.IncludePatterns = res.IncludePatterns + + r.ExcludePatterns = res.ExcludePatterns + + r.CsvOptions = res.CsvOptions + + r.JsonOptions = res.JsonOptions + + r.Schedule = res.Schedule + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoverySpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoverySpec *AssetDiscoverySpec = &AssetDiscoverySpec{empty: true} + +func (r *AssetDiscoverySpec) Empty() bool { + return r.empty +} + +func (r *AssetDiscoverySpec) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoverySpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoverySpecCsvOptions struct { + empty bool `json:"-"` + HeaderRows *int64 `json:"headerRows"` + Delimiter *string `json:"delimiter"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonAssetDiscoverySpecCsvOptions AssetDiscoverySpecCsvOptions + +func (r *AssetDiscoverySpecCsvOptions) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoverySpecCsvOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoverySpecCsvOptions + } else { + + r.HeaderRows = res.HeaderRows + + r.Delimiter = res.Delimiter + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoverySpecCsvOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoverySpecCsvOptions *AssetDiscoverySpecCsvOptions = &AssetDiscoverySpecCsvOptions{empty: true} + +func (r *AssetDiscoverySpecCsvOptions) Empty() bool { + return r.empty +} + +func (r *AssetDiscoverySpecCsvOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoverySpecCsvOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoverySpecJsonOptions struct { + empty bool `json:"-"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonAssetDiscoverySpecJsonOptions AssetDiscoverySpecJsonOptions + +func (r *AssetDiscoverySpecJsonOptions) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoverySpecJsonOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoverySpecJsonOptions + } else { + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoverySpecJsonOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoverySpecJsonOptions *AssetDiscoverySpecJsonOptions = &AssetDiscoverySpecJsonOptions{empty: true} + +func (r *AssetDiscoverySpecJsonOptions) Empty() bool { + return r.empty +} + +func (r *AssetDiscoverySpecJsonOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoverySpecJsonOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoveryStatus struct { + empty bool `json:"-"` + State *AssetDiscoveryStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` + LastRunTime *string `json:"lastRunTime"` + Stats *AssetDiscoveryStatusStats `json:"stats"` + LastRunDuration *string `json:"lastRunDuration"` +} + +type jsonAssetDiscoveryStatus AssetDiscoveryStatus + +func (r *AssetDiscoveryStatus) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoveryStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoveryStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + r.LastRunTime = res.LastRunTime + + r.Stats = res.Stats + + r.LastRunDuration = res.LastRunDuration + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoveryStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoveryStatus *AssetDiscoveryStatus = &AssetDiscoveryStatus{empty: true} + +func (r *AssetDiscoveryStatus) Empty() bool { + return r.empty +} + +func (r *AssetDiscoveryStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoveryStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoveryStatusStats struct { + empty bool `json:"-"` + DataItems *int64 `json:"dataItems"` + DataSize *int64 `json:"dataSize"` + Tables *int64 `json:"tables"` + Filesets *int64 `json:"filesets"` +} + +type jsonAssetDiscoveryStatusStats AssetDiscoveryStatusStats + +func (r *AssetDiscoveryStatusStats) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoveryStatusStats + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoveryStatusStats + } else { + + r.DataItems = res.DataItems + + r.DataSize = res.DataSize + + r.Tables = res.Tables + + r.Filesets = res.Filesets + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoveryStatusStats is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoveryStatusStats *AssetDiscoveryStatusStats = &AssetDiscoveryStatusStats{empty: true} + +func (r *AssetDiscoveryStatusStats) Empty() bool { + return r.empty +} + +func (r *AssetDiscoveryStatusStats) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoveryStatusStats) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Asset) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataplex", + Type: "Asset", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataplex", +{{- end }} + } +} + +func (r *Asset) ID() (string, error) { + if err := extractAssetFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "resource_spec": dcl.ValueOrEmptyString(nr.ResourceSpec), + "resource_status": dcl.ValueOrEmptyString(nr.ResourceStatus), + "security_status": dcl.ValueOrEmptyString(nr.SecurityStatus), + "discovery_spec": dcl.ValueOrEmptyString(nr.DiscoverySpec), + "discovery_status": dcl.ValueOrEmptyString(nr.DiscoveryStatus), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "dataplex_zone": dcl.ValueOrEmptyString(nr.DataplexZone), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplex_zone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const AssetMaxPage = -1 + +type AssetList struct { + Items []*Asset + + nextToken string + + pageSize int32 + + resource *Asset +} + +func (l *AssetList) HasNext() bool { + return l.nextToken != "" +} + +func (l *AssetList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listAsset(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListAsset(ctx context.Context, project, location, dataplexZone, lake string) (*AssetList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListAssetWithMaxResults(ctx, project, location, dataplexZone, lake, AssetMaxPage) + +} + +func (c *Client) ListAssetWithMaxResults(ctx context.Context, project, location, dataplexZone, lake string, pageSize int32) (*AssetList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Asset{ + Project: &project, + Location: &location, + DataplexZone: &dataplexZone, + Lake: &lake, + } + items, token, err := c.listAsset(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &AssetList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetAsset(ctx context.Context, r *Asset) (*Asset, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractAssetFields(r) + + b, err := c.getAssetRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalAsset(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.DataplexZone = r.DataplexZone + result.Lake = r.Lake + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeAssetNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractAssetFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteAsset(ctx context.Context, r *Asset) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Asset resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Asset...") + deleteOp := deleteAssetOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllAsset deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllAsset(ctx context.Context, project, location, dataplexZone, lake string, filter func(*Asset) bool) error { + listObj, err := c.ListAsset(ctx, project, location, dataplexZone, lake) + if err != nil { + return err + } + + err = c.deleteAllAsset(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllAsset(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyAsset(ctx context.Context, rawDesired *Asset, opts ...dcl.ApplyOption) (*Asset, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Asset + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyAssetHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyAssetHelper(c *Client, ctx context.Context, rawDesired *Asset, opts ...dcl.ApplyOption) (*Asset, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyAsset...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractAssetFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.assetDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToAssetDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []assetApiOperation + if create { + ops = append(ops, &createAssetOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyAssetDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyAssetDiff(c *Client, ctx context.Context, desired *Asset, rawDesired *Asset, ops []assetApiOperation, opts ...dcl.ApplyOption) (*Asset, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetAsset(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createAssetOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapAsset(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeAssetNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeAssetNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeAssetDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractAssetFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractAssetFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffAsset(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Asset) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/asset_internal.go b/mmv1/third_party/terraform/services/dataplex/asset_internal.go new file mode 100644 index 000000000000..02db40169243 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/asset_internal.go @@ -0,0 +1,4139 @@ +package dataplex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Asset) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "resourceSpec"); err != nil { + return err + } + if err := dcl.Required(r, "discoverySpec"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Lake, "Lake"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.DataplexZone, "DataplexZone"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ResourceSpec) { + if err := r.ResourceSpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ResourceStatus) { + if err := r.ResourceStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecurityStatus) { + if err := r.SecurityStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DiscoverySpec) { + if err := r.DiscoverySpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DiscoveryStatus) { + if err := r.DiscoveryStatus.validate(); err != nil { + return err + } + } + return nil +} +func (r *AssetResourceSpec) validate() error { + if err := dcl.Required(r, "type"); err != nil { + return err + } + return nil +} +func (r *AssetResourceStatus) validate() error { + return nil +} +func (r *AssetSecurityStatus) validate() error { + return nil +} +func (r *AssetDiscoverySpec) validate() error { + if err := dcl.Required(r, "enabled"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.CsvOptions) { + if err := r.CsvOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.JsonOptions) { + if err := r.JsonOptions.validate(); err != nil { + return err + } + } + return nil +} +func (r *AssetDiscoverySpecCsvOptions) validate() error { + return nil +} +func (r *AssetDiscoverySpecJsonOptions) validate() error { + return nil +} +func (r *AssetDiscoveryStatus) validate() error { + if !dcl.IsEmptyValueIndirect(r.Stats) { + if err := r.Stats.validate(); err != nil { + return err + } + } + return nil +} +func (r *AssetDiscoveryStatusStats) validate() error { + return nil +} +func (r *Asset) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataplex.googleapis.com/v1/", params) +} + +func (r *Asset) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Asset) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Asset) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets?assetId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Asset) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Asset) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Asset) SetPolicyVerb() string { + return "" +} + +func (r *Asset) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Asset) IAMPolicyVersion() int { + return 3 +} + +// assetApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type assetApiOperation interface { + do(context.Context, *Asset, *Client) error +} + +// newUpdateAssetUpdateAssetRequest creates a request for an +// Asset resource's UpdateAsset update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateAssetUpdateAssetRequest(ctx context.Context, f *Asset, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.DataplexZone), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["name"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandAssetResourceSpec(c, f.ResourceSpec, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSpec into resourceSpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["resourceSpec"] = v + } + if v, err := expandAssetResourceStatus(c, f.ResourceStatus, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceStatus into resourceStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["resourceStatus"] = v + } + if v, err := expandAssetSecurityStatus(c, f.SecurityStatus, res); err != nil { + return nil, fmt.Errorf("error expanding SecurityStatus into securityStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["securityStatus"] = v + } + if v, err := expandAssetDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["discoverySpec"] = v + } + if v, err := expandAssetDiscoveryStatus(c, f.DiscoveryStatus, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoveryStatus into discoveryStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["discoveryStatus"] = v + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", *f.Project, *f.Location, *f.Lake, *f.DataplexZone, *f.Name) + + return req, nil +} + +// marshalUpdateAssetUpdateAssetRequest converts the update into +// the final JSON request body. +func marshalUpdateAssetUpdateAssetRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateAssetUpdateAssetOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateAssetUpdateAssetOperation) do(ctx context.Context, r *Asset, c *Client) error { + _, err := c.GetAsset(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAsset") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateAssetUpdateAssetRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateAssetUpdateAssetRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listAssetRaw(ctx context.Context, r *Asset, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != AssetMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listAssetOperation struct { + Assets []map[string]interface{} `json:"assets"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listAsset(ctx context.Context, r *Asset, pageToken string, pageSize int32) ([]*Asset, string, error) { + b, err := c.listAssetRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listAssetOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Asset + for _, v := range m.Assets { + res, err := unmarshalMapAsset(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.DataplexZone = r.DataplexZone + res.Lake = r.Lake + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllAsset(ctx context.Context, f func(*Asset) bool, resources []*Asset) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteAsset(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteAssetOperation struct{} + +func (op *deleteAssetOperation) do(ctx context.Context, r *Asset, c *Client) error { + r, err := c.GetAsset(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Asset not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetAsset checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetAsset(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createAssetOperation struct { + response map[string]interface{} +} + +func (op *createAssetOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createAssetOperation) do(ctx context.Context, r *Asset, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetAsset(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getAssetRaw(ctx context.Context, r *Asset) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) assetDiffsForRawDesired(ctx context.Context, rawDesired *Asset, opts ...dcl.ApplyOption) (initial, desired *Asset, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Asset + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Asset); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Asset, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetAsset(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Asset resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Asset resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Asset resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeAssetDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Asset: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Asset: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractAssetFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeAssetInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Asset: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeAssetDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Asset: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffAsset(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeAssetInitialState(rawInitial, rawDesired *Asset) (*Asset, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeAssetDesiredState(rawDesired, rawInitial *Asset, opts ...dcl.ApplyOption) (*Asset, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.ResourceSpec = canonicalizeAssetResourceSpec(rawDesired.ResourceSpec, nil, opts...) + rawDesired.ResourceStatus = canonicalizeAssetResourceStatus(rawDesired.ResourceStatus, nil, opts...) + rawDesired.SecurityStatus = canonicalizeAssetSecurityStatus(rawDesired.SecurityStatus, nil, opts...) + rawDesired.DiscoverySpec = canonicalizeAssetDiscoverySpec(rawDesired.DiscoverySpec, nil, opts...) + rawDesired.DiscoveryStatus = canonicalizeAssetDiscoveryStatus(rawDesired.DiscoveryStatus, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Asset{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + canonicalDesired.ResourceSpec = canonicalizeAssetResourceSpec(rawDesired.ResourceSpec, rawInitial.ResourceSpec, opts...) + canonicalDesired.DiscoverySpec = canonicalizeAssetDiscoverySpec(rawDesired.DiscoverySpec, rawInitial.DiscoverySpec, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Lake, rawInitial.Lake) { + canonicalDesired.Lake = rawInitial.Lake + } else { + canonicalDesired.Lake = rawDesired.Lake + } + if dcl.NameToSelfLink(rawDesired.DataplexZone, rawInitial.DataplexZone) { + canonicalDesired.DataplexZone = rawInitial.DataplexZone + } else { + canonicalDesired.DataplexZone = rawDesired.DataplexZone + } + return canonicalDesired, nil +} + +func canonicalizeAssetNewState(c *Client, rawNew, rawDesired *Asset) (*Asset, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceSpec) && dcl.IsEmptyValueIndirect(rawDesired.ResourceSpec) { + rawNew.ResourceSpec = rawDesired.ResourceSpec + } else { + rawNew.ResourceSpec = canonicalizeNewAssetResourceSpec(c, rawDesired.ResourceSpec, rawNew.ResourceSpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceStatus) && dcl.IsEmptyValueIndirect(rawDesired.ResourceStatus) { + rawNew.ResourceStatus = rawDesired.ResourceStatus + } else { + rawNew.ResourceStatus = canonicalizeNewAssetResourceStatus(c, rawDesired.ResourceStatus, rawNew.ResourceStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.SecurityStatus) && dcl.IsEmptyValueIndirect(rawDesired.SecurityStatus) { + rawNew.SecurityStatus = rawDesired.SecurityStatus + } else { + rawNew.SecurityStatus = canonicalizeNewAssetSecurityStatus(c, rawDesired.SecurityStatus, rawNew.SecurityStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.DiscoverySpec) && dcl.IsEmptyValueIndirect(rawDesired.DiscoverySpec) { + rawNew.DiscoverySpec = rawDesired.DiscoverySpec + } else { + rawNew.DiscoverySpec = canonicalizeNewAssetDiscoverySpec(c, rawDesired.DiscoverySpec, rawNew.DiscoverySpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.DiscoveryStatus) && dcl.IsEmptyValueIndirect(rawDesired.DiscoveryStatus) { + rawNew.DiscoveryStatus = rawDesired.DiscoveryStatus + } else { + rawNew.DiscoveryStatus = canonicalizeNewAssetDiscoveryStatus(c, rawDesired.DiscoveryStatus, rawNew.DiscoveryStatus) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Lake = rawDesired.Lake + + rawNew.DataplexZone = rawDesired.DataplexZone + + return rawNew, nil +} + +func canonicalizeAssetResourceSpec(des, initial *AssetResourceSpec, opts ...dcl.ApplyOption) *AssetResourceSpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetResourceSpec{} + + if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { + cDes.Name = initial.Name + } else { + cDes.Name = des.Name + } + if dcl.IsZeroValue(des.Type) || (dcl.IsEmptyValueIndirect(des.Type) && dcl.IsEmptyValueIndirect(initial.Type)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Type = initial.Type + } else { + cDes.Type = des.Type + } + if dcl.IsZeroValue(des.ReadAccessMode) || (dcl.IsEmptyValueIndirect(des.ReadAccessMode) && dcl.IsEmptyValueIndirect(initial.ReadAccessMode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ReadAccessMode = initial.ReadAccessMode + } else { + cDes.ReadAccessMode = des.ReadAccessMode + } + + return cDes +} + +func canonicalizeAssetResourceSpecSlice(des, initial []AssetResourceSpec, opts ...dcl.ApplyOption) []AssetResourceSpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetResourceSpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetResourceSpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetResourceSpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetResourceSpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetResourceSpec(c *Client, des, nw *AssetResourceSpec) *AssetResourceSpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetResourceSpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Name, nw.Name) { + nw.Name = des.Name + } + + return nw +} + +func canonicalizeNewAssetResourceSpecSet(c *Client, des, nw []AssetResourceSpec) []AssetResourceSpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetResourceSpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetResourceSpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetResourceSpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetResourceSpecSlice(c *Client, des, nw []AssetResourceSpec) []AssetResourceSpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetResourceSpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetResourceSpec(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetResourceStatus(des, initial *AssetResourceStatus, opts ...dcl.ApplyOption) *AssetResourceStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetResourceStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeAssetResourceStatusSlice(des, initial []AssetResourceStatus, opts ...dcl.ApplyOption) []AssetResourceStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetResourceStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetResourceStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetResourceStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetResourceStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetResourceStatus(c *Client, des, nw *AssetResourceStatus) *AssetResourceStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetResourceStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + + return nw +} + +func canonicalizeNewAssetResourceStatusSet(c *Client, des, nw []AssetResourceStatus) []AssetResourceStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetResourceStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetResourceStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetResourceStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetResourceStatusSlice(c *Client, des, nw []AssetResourceStatus) []AssetResourceStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetResourceStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetResourceStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetSecurityStatus(des, initial *AssetSecurityStatus, opts ...dcl.ApplyOption) *AssetSecurityStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetSecurityStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeAssetSecurityStatusSlice(des, initial []AssetSecurityStatus, opts ...dcl.ApplyOption) []AssetSecurityStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetSecurityStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetSecurityStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetSecurityStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetSecurityStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetSecurityStatus(c *Client, des, nw *AssetSecurityStatus) *AssetSecurityStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetSecurityStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + + return nw +} + +func canonicalizeNewAssetSecurityStatusSet(c *Client, des, nw []AssetSecurityStatus) []AssetSecurityStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetSecurityStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetSecurityStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetSecurityStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetSecurityStatusSlice(c *Client, des, nw []AssetSecurityStatus) []AssetSecurityStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetSecurityStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetSecurityStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoverySpec(des, initial *AssetDiscoverySpec, opts ...dcl.ApplyOption) *AssetDiscoverySpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoverySpec{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, initial.IncludePatterns) { + cDes.IncludePatterns = initial.IncludePatterns + } else { + cDes.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, initial.ExcludePatterns) { + cDes.ExcludePatterns = initial.ExcludePatterns + } else { + cDes.ExcludePatterns = des.ExcludePatterns + } + cDes.CsvOptions = canonicalizeAssetDiscoverySpecCsvOptions(des.CsvOptions, initial.CsvOptions, opts...) + cDes.JsonOptions = canonicalizeAssetDiscoverySpecJsonOptions(des.JsonOptions, initial.JsonOptions, opts...) + if dcl.StringCanonicalize(des.Schedule, initial.Schedule) || dcl.IsZeroValue(des.Schedule) { + cDes.Schedule = initial.Schedule + } else { + cDes.Schedule = des.Schedule + } + + return cDes +} + +func canonicalizeAssetDiscoverySpecSlice(des, initial []AssetDiscoverySpec, opts ...dcl.ApplyOption) []AssetDiscoverySpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoverySpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoverySpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoverySpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoverySpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoverySpec(c *Client, des, nw *AssetDiscoverySpec) *AssetDiscoverySpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoverySpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, nw.IncludePatterns) { + nw.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, nw.ExcludePatterns) { + nw.ExcludePatterns = des.ExcludePatterns + } + nw.CsvOptions = canonicalizeNewAssetDiscoverySpecCsvOptions(c, des.CsvOptions, nw.CsvOptions) + nw.JsonOptions = canonicalizeNewAssetDiscoverySpecJsonOptions(c, des.JsonOptions, nw.JsonOptions) + if dcl.StringCanonicalize(des.Schedule, nw.Schedule) { + nw.Schedule = des.Schedule + } + + return nw +} + +func canonicalizeNewAssetDiscoverySpecSet(c *Client, des, nw []AssetDiscoverySpec) []AssetDiscoverySpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoverySpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoverySpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoverySpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoverySpecSlice(c *Client, des, nw []AssetDiscoverySpec) []AssetDiscoverySpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoverySpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoverySpec(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoverySpecCsvOptions(des, initial *AssetDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) *AssetDiscoverySpecCsvOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoverySpecCsvOptions{} + + if dcl.IsZeroValue(des.HeaderRows) || (dcl.IsEmptyValueIndirect(des.HeaderRows) && dcl.IsEmptyValueIndirect(initial.HeaderRows)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.HeaderRows = initial.HeaderRows + } else { + cDes.HeaderRows = des.HeaderRows + } + if dcl.StringCanonicalize(des.Delimiter, initial.Delimiter) || dcl.IsZeroValue(des.Delimiter) { + cDes.Delimiter = initial.Delimiter + } else { + cDes.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeAssetDiscoverySpecCsvOptionsSlice(des, initial []AssetDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) []AssetDiscoverySpecCsvOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoverySpecCsvOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoverySpecCsvOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoverySpecCsvOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoverySpecCsvOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoverySpecCsvOptions(c *Client, des, nw *AssetDiscoverySpecCsvOptions) *AssetDiscoverySpecCsvOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoverySpecCsvOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Delimiter, nw.Delimiter) { + nw.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewAssetDiscoverySpecCsvOptionsSet(c *Client, des, nw []AssetDiscoverySpecCsvOptions) []AssetDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoverySpecCsvOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoverySpecCsvOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoverySpecCsvOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoverySpecCsvOptionsSlice(c *Client, des, nw []AssetDiscoverySpecCsvOptions) []AssetDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoverySpecCsvOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoverySpecCsvOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoverySpecJsonOptions(des, initial *AssetDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) *AssetDiscoverySpecJsonOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoverySpecJsonOptions{} + + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeAssetDiscoverySpecJsonOptionsSlice(des, initial []AssetDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) []AssetDiscoverySpecJsonOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoverySpecJsonOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoverySpecJsonOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoverySpecJsonOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoverySpecJsonOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoverySpecJsonOptions(c *Client, des, nw *AssetDiscoverySpecJsonOptions) *AssetDiscoverySpecJsonOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoverySpecJsonOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewAssetDiscoverySpecJsonOptionsSet(c *Client, des, nw []AssetDiscoverySpecJsonOptions) []AssetDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoverySpecJsonOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoverySpecJsonOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoverySpecJsonOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoverySpecJsonOptionsSlice(c *Client, des, nw []AssetDiscoverySpecJsonOptions) []AssetDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoverySpecJsonOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoverySpecJsonOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoveryStatus(des, initial *AssetDiscoveryStatus, opts ...dcl.ApplyOption) *AssetDiscoveryStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoveryStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.IsZeroValue(des.LastRunTime) || (dcl.IsEmptyValueIndirect(des.LastRunTime) && dcl.IsEmptyValueIndirect(initial.LastRunTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.LastRunTime = initial.LastRunTime + } else { + cDes.LastRunTime = des.LastRunTime + } + cDes.Stats = canonicalizeAssetDiscoveryStatusStats(des.Stats, initial.Stats, opts...) + if dcl.StringCanonicalize(des.LastRunDuration, initial.LastRunDuration) || dcl.IsZeroValue(des.LastRunDuration) { + cDes.LastRunDuration = initial.LastRunDuration + } else { + cDes.LastRunDuration = des.LastRunDuration + } + + return cDes +} + +func canonicalizeAssetDiscoveryStatusSlice(des, initial []AssetDiscoveryStatus, opts ...dcl.ApplyOption) []AssetDiscoveryStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoveryStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoveryStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoveryStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoveryStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoveryStatus(c *Client, des, nw *AssetDiscoveryStatus) *AssetDiscoveryStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoveryStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + nw.Stats = canonicalizeNewAssetDiscoveryStatusStats(c, des.Stats, nw.Stats) + if dcl.StringCanonicalize(des.LastRunDuration, nw.LastRunDuration) { + nw.LastRunDuration = des.LastRunDuration + } + + return nw +} + +func canonicalizeNewAssetDiscoveryStatusSet(c *Client, des, nw []AssetDiscoveryStatus) []AssetDiscoveryStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoveryStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoveryStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoveryStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoveryStatusSlice(c *Client, des, nw []AssetDiscoveryStatus) []AssetDiscoveryStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoveryStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoveryStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoveryStatusStats(des, initial *AssetDiscoveryStatusStats, opts ...dcl.ApplyOption) *AssetDiscoveryStatusStats { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoveryStatusStats{} + + if dcl.IsZeroValue(des.DataItems) || (dcl.IsEmptyValueIndirect(des.DataItems) && dcl.IsEmptyValueIndirect(initial.DataItems)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataItems = initial.DataItems + } else { + cDes.DataItems = des.DataItems + } + if dcl.IsZeroValue(des.DataSize) || (dcl.IsEmptyValueIndirect(des.DataSize) && dcl.IsEmptyValueIndirect(initial.DataSize)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataSize = initial.DataSize + } else { + cDes.DataSize = des.DataSize + } + if dcl.IsZeroValue(des.Tables) || (dcl.IsEmptyValueIndirect(des.Tables) && dcl.IsEmptyValueIndirect(initial.Tables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tables = initial.Tables + } else { + cDes.Tables = des.Tables + } + if dcl.IsZeroValue(des.Filesets) || (dcl.IsEmptyValueIndirect(des.Filesets) && dcl.IsEmptyValueIndirect(initial.Filesets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Filesets = initial.Filesets + } else { + cDes.Filesets = des.Filesets + } + + return cDes +} + +func canonicalizeAssetDiscoveryStatusStatsSlice(des, initial []AssetDiscoveryStatusStats, opts ...dcl.ApplyOption) []AssetDiscoveryStatusStats { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoveryStatusStats, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoveryStatusStats(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoveryStatusStats, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoveryStatusStats(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoveryStatusStats(c *Client, des, nw *AssetDiscoveryStatusStats) *AssetDiscoveryStatusStats { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoveryStatusStats while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewAssetDiscoveryStatusStatsSet(c *Client, des, nw []AssetDiscoveryStatusStats) []AssetDiscoveryStatusStats { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoveryStatusStats + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoveryStatusStatsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoveryStatusStats(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoveryStatusStatsSlice(c *Client, des, nw []AssetDiscoveryStatusStats) []AssetDiscoveryStatusStats { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoveryStatusStats + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoveryStatusStats(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffAsset(c *Client, desired, actual *Asset, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceSpec, actual.ResourceSpec, dcl.DiffInfo{ObjectFunction: compareAssetResourceSpecNewStyle, EmptyObject: EmptyAssetResourceSpec, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceSpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceStatus, actual.ResourceStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareAssetResourceStatusNewStyle, EmptyObject: EmptyAssetResourceStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityStatus, actual.SecurityStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareAssetSecurityStatusNewStyle, EmptyObject: EmptyAssetSecurityStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecurityStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiscoverySpec, actual.DiscoverySpec, dcl.DiffInfo{ObjectFunction: compareAssetDiscoverySpecNewStyle, EmptyObject: EmptyAssetDiscoverySpec, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DiscoverySpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiscoveryStatus, actual.DiscoveryStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareAssetDiscoveryStatusNewStyle, EmptyObject: EmptyAssetDiscoveryStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiscoveryStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Lake, actual.Lake, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Lake")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DataplexZone, actual.DataplexZone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareAssetResourceSpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetResourceSpec) + if !ok { + desiredNotPointer, ok := d.(AssetResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceSpec or *AssetResourceSpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetResourceSpec) + if !ok { + actualNotPointer, ok := a.(AssetResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceSpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReadAccessMode, actual.ReadAccessMode, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("ReadAccessMode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetResourceStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetResourceStatus) + if !ok { + desiredNotPointer, ok := d.(AssetResourceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceStatus or *AssetResourceStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetResourceStatus) + if !ok { + actualNotPointer, ok := a.(AssetResourceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetSecurityStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetSecurityStatus) + if !ok { + desiredNotPointer, ok := d.(AssetSecurityStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetSecurityStatus or *AssetSecurityStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetSecurityStatus) + if !ok { + actualNotPointer, ok := a.(AssetSecurityStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetSecurityStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoverySpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoverySpec) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpec or *AssetDiscoverySpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoverySpec) + if !ok { + actualNotPointer, ok := a.(AssetDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IncludePatterns, actual.IncludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("IncludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExcludePatterns, actual.ExcludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("ExcludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CsvOptions, actual.CsvOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareAssetDiscoverySpecCsvOptionsNewStyle, EmptyObject: EmptyAssetDiscoverySpecCsvOptions, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("CsvOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JsonOptions, actual.JsonOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareAssetDiscoverySpecJsonOptionsNewStyle, EmptyObject: EmptyAssetDiscoverySpecJsonOptions, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("JsonOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Schedule, actual.Schedule, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Schedule")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoverySpecCsvOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoverySpecCsvOptions) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecCsvOptions or *AssetDiscoverySpecCsvOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoverySpecCsvOptions) + if !ok { + actualNotPointer, ok := a.(AssetDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecCsvOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HeaderRows, actual.HeaderRows, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("HeaderRows")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Delimiter, actual.Delimiter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Delimiter")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoverySpecJsonOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoverySpecJsonOptions) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecJsonOptions or *AssetDiscoverySpecJsonOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoverySpecJsonOptions) + if !ok { + actualNotPointer, ok := a.(AssetDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecJsonOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoveryStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoveryStatus) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoveryStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatus or *AssetDiscoveryStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoveryStatus) + if !ok { + actualNotPointer, ok := a.(AssetDiscoveryStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LastRunTime, actual.LastRunTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("LastRunTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Stats, actual.Stats, dcl.DiffInfo{ObjectFunction: compareAssetDiscoveryStatusStatsNewStyle, EmptyObject: EmptyAssetDiscoveryStatusStats, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Stats")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LastRunDuration, actual.LastRunDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("LastRunDuration")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoveryStatusStatsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoveryStatusStats) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoveryStatusStats) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatusStats or *AssetDiscoveryStatusStats", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoveryStatusStats) + if !ok { + actualNotPointer, ok := a.(AssetDiscoveryStatusStats) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatusStats", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataItems, actual.DataItems, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DataItems")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DataSize, actual.DataSize, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DataSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tables, actual.Tables, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Tables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Filesets, actual.Filesets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Filesets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Asset) urlNormalized() *Asset { + normalized := dcl.Copy(*r).(Asset) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Lake = dcl.SelfLinkToName(r.Lake) + normalized.DataplexZone = dcl.SelfLinkToName(r.DataplexZone) + return &normalized +} + +func (r *Asset) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAsset" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Asset resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Asset) marshal(c *Client) ([]byte, error) { + m, err := expandAsset(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Asset: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalAsset decodes JSON responses into the Asset resource schema. +func unmarshalAsset(b []byte, c *Client, res *Asset) (*Asset, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapAsset(m, c, res) +} + +func unmarshalMapAsset(m map[string]interface{}, c *Client, res *Asset) (*Asset, error) { + + flattened := flattenAsset(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandAsset expands Asset into a JSON request object. +func expandAsset(c *Client, f *Asset) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.DataplexZone), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v, err := expandAssetResourceSpec(c, f.ResourceSpec, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSpec into resourceSpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["resourceSpec"] = v + } + if v, err := expandAssetDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["discoverySpec"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Lake into lake: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lake"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding DataplexZone into zone: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["zone"] = v + } + + return m, nil +} + +// flattenAsset flattens Asset from a JSON request object into the +// Asset type. +func flattenAsset(c *Client, i interface{}, res *Asset) *Asset { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Asset{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenAssetStateEnum(m["state"]) + resultRes.ResourceSpec = flattenAssetResourceSpec(c, m["resourceSpec"], res) + resultRes.ResourceStatus = flattenAssetResourceStatus(c, m["resourceStatus"], res) + resultRes.SecurityStatus = flattenAssetSecurityStatus(c, m["securityStatus"], res) + resultRes.DiscoverySpec = flattenAssetDiscoverySpec(c, m["discoverySpec"], res) + resultRes.DiscoveryStatus = flattenAssetDiscoveryStatus(c, m["discoveryStatus"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Lake = dcl.FlattenString(m["lake"]) + resultRes.DataplexZone = dcl.FlattenString(m["zone"]) + + return resultRes +} + +// expandAssetResourceSpecMap expands the contents of AssetResourceSpec into a JSON +// request object. +func expandAssetResourceSpecMap(c *Client, f map[string]AssetResourceSpec, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetResourceSpecSlice expands the contents of AssetResourceSpec into a JSON +// request object. +func expandAssetResourceSpecSlice(c *Client, f []AssetResourceSpec, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetResourceSpecMap flattens the contents of AssetResourceSpec from a JSON +// response object. +func flattenAssetResourceSpecMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceSpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceSpec{} + } + + if len(a) == 0 { + return map[string]AssetResourceSpec{} + } + + items := make(map[string]AssetResourceSpec) + for k, item := range a { + items[k] = *flattenAssetResourceSpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetResourceSpecSlice flattens the contents of AssetResourceSpec from a JSON +// response object. +func flattenAssetResourceSpecSlice(c *Client, i interface{}, res *Asset) []AssetResourceSpec { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceSpec{} + } + + if len(a) == 0 { + return []AssetResourceSpec{} + } + + items := make([]AssetResourceSpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceSpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetResourceSpec expands an instance of AssetResourceSpec into a JSON +// request object. +func expandAssetResourceSpec(c *Client, f *AssetResourceSpec, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Name; !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Type; !dcl.IsEmptyValueIndirect(v) { + m["type"] = v + } + if v := f.ReadAccessMode; !dcl.IsEmptyValueIndirect(v) { + m["readAccessMode"] = v + } + + return m, nil +} + +// flattenAssetResourceSpec flattens an instance of AssetResourceSpec from a JSON +// response object. +func flattenAssetResourceSpec(c *Client, i interface{}, res *Asset) *AssetResourceSpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetResourceSpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetResourceSpec + } + r.Name = dcl.FlattenString(m["name"]) + r.Type = flattenAssetResourceSpecTypeEnum(m["type"]) + r.ReadAccessMode = flattenAssetResourceSpecReadAccessModeEnum(m["readAccessMode"]) + + return r +} + +// expandAssetResourceStatusMap expands the contents of AssetResourceStatus into a JSON +// request object. +func expandAssetResourceStatusMap(c *Client, f map[string]AssetResourceStatus, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetResourceStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetResourceStatusSlice expands the contents of AssetResourceStatus into a JSON +// request object. +func expandAssetResourceStatusSlice(c *Client, f []AssetResourceStatus, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetResourceStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetResourceStatusMap flattens the contents of AssetResourceStatus from a JSON +// response object. +func flattenAssetResourceStatusMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceStatus{} + } + + if len(a) == 0 { + return map[string]AssetResourceStatus{} + } + + items := make(map[string]AssetResourceStatus) + for k, item := range a { + items[k] = *flattenAssetResourceStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetResourceStatusSlice flattens the contents of AssetResourceStatus from a JSON +// response object. +func flattenAssetResourceStatusSlice(c *Client, i interface{}, res *Asset) []AssetResourceStatus { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceStatus{} + } + + if len(a) == 0 { + return []AssetResourceStatus{} + } + + items := make([]AssetResourceStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetResourceStatus expands an instance of AssetResourceStatus into a JSON +// request object. +func expandAssetResourceStatus(c *Client, f *AssetResourceStatus, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenAssetResourceStatus flattens an instance of AssetResourceStatus from a JSON +// response object. +func flattenAssetResourceStatus(c *Client, i interface{}, res *Asset) *AssetResourceStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetResourceStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetResourceStatus + } + r.State = flattenAssetResourceStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandAssetSecurityStatusMap expands the contents of AssetSecurityStatus into a JSON +// request object. +func expandAssetSecurityStatusMap(c *Client, f map[string]AssetSecurityStatus, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetSecurityStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetSecurityStatusSlice expands the contents of AssetSecurityStatus into a JSON +// request object. +func expandAssetSecurityStatusSlice(c *Client, f []AssetSecurityStatus, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetSecurityStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetSecurityStatusMap flattens the contents of AssetSecurityStatus from a JSON +// response object. +func flattenAssetSecurityStatusMap(c *Client, i interface{}, res *Asset) map[string]AssetSecurityStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetSecurityStatus{} + } + + if len(a) == 0 { + return map[string]AssetSecurityStatus{} + } + + items := make(map[string]AssetSecurityStatus) + for k, item := range a { + items[k] = *flattenAssetSecurityStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetSecurityStatusSlice flattens the contents of AssetSecurityStatus from a JSON +// response object. +func flattenAssetSecurityStatusSlice(c *Client, i interface{}, res *Asset) []AssetSecurityStatus { + a, ok := i.([]interface{}) + if !ok { + return []AssetSecurityStatus{} + } + + if len(a) == 0 { + return []AssetSecurityStatus{} + } + + items := make([]AssetSecurityStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetSecurityStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetSecurityStatus expands an instance of AssetSecurityStatus into a JSON +// request object. +func expandAssetSecurityStatus(c *Client, f *AssetSecurityStatus, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenAssetSecurityStatus flattens an instance of AssetSecurityStatus from a JSON +// response object. +func flattenAssetSecurityStatus(c *Client, i interface{}, res *Asset) *AssetSecurityStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetSecurityStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetSecurityStatus + } + r.State = flattenAssetSecurityStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandAssetDiscoverySpecMap expands the contents of AssetDiscoverySpec into a JSON +// request object. +func expandAssetDiscoverySpecMap(c *Client, f map[string]AssetDiscoverySpec, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoverySpecSlice expands the contents of AssetDiscoverySpec into a JSON +// request object. +func expandAssetDiscoverySpecSlice(c *Client, f []AssetDiscoverySpec, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoverySpecMap flattens the contents of AssetDiscoverySpec from a JSON +// response object. +func flattenAssetDiscoverySpecMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoverySpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoverySpec{} + } + + if len(a) == 0 { + return map[string]AssetDiscoverySpec{} + } + + items := make(map[string]AssetDiscoverySpec) + for k, item := range a { + items[k] = *flattenAssetDiscoverySpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoverySpecSlice flattens the contents of AssetDiscoverySpec from a JSON +// response object. +func flattenAssetDiscoverySpecSlice(c *Client, i interface{}, res *Asset) []AssetDiscoverySpec { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoverySpec{} + } + + if len(a) == 0 { + return []AssetDiscoverySpec{} + } + + items := make([]AssetDiscoverySpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoverySpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoverySpec expands an instance of AssetDiscoverySpec into a JSON +// request object. +func expandAssetDiscoverySpec(c *Client, f *AssetDiscoverySpec, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.IncludePatterns; v != nil { + m["includePatterns"] = v + } + if v := f.ExcludePatterns; v != nil { + m["excludePatterns"] = v + } + if v, err := expandAssetDiscoverySpecCsvOptions(c, f.CsvOptions, res); err != nil { + return nil, fmt.Errorf("error expanding CsvOptions into csvOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["csvOptions"] = v + } + if v, err := expandAssetDiscoverySpecJsonOptions(c, f.JsonOptions, res); err != nil { + return nil, fmt.Errorf("error expanding JsonOptions into jsonOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["jsonOptions"] = v + } + if v := f.Schedule; !dcl.IsEmptyValueIndirect(v) { + m["schedule"] = v + } + + return m, nil +} + +// flattenAssetDiscoverySpec flattens an instance of AssetDiscoverySpec from a JSON +// response object. +func flattenAssetDiscoverySpec(c *Client, i interface{}, res *Asset) *AssetDiscoverySpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoverySpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoverySpec + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.IncludePatterns = dcl.FlattenStringSlice(m["includePatterns"]) + r.ExcludePatterns = dcl.FlattenStringSlice(m["excludePatterns"]) + r.CsvOptions = flattenAssetDiscoverySpecCsvOptions(c, m["csvOptions"], res) + r.JsonOptions = flattenAssetDiscoverySpecJsonOptions(c, m["jsonOptions"], res) + r.Schedule = dcl.FlattenString(m["schedule"]) + + return r +} + +// expandAssetDiscoverySpecCsvOptionsMap expands the contents of AssetDiscoverySpecCsvOptions into a JSON +// request object. +func expandAssetDiscoverySpecCsvOptionsMap(c *Client, f map[string]AssetDiscoverySpecCsvOptions, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoverySpecCsvOptionsSlice expands the contents of AssetDiscoverySpecCsvOptions into a JSON +// request object. +func expandAssetDiscoverySpecCsvOptionsSlice(c *Client, f []AssetDiscoverySpecCsvOptions, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoverySpecCsvOptionsMap flattens the contents of AssetDiscoverySpecCsvOptions from a JSON +// response object. +func flattenAssetDiscoverySpecCsvOptionsMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoverySpecCsvOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return map[string]AssetDiscoverySpecCsvOptions{} + } + + items := make(map[string]AssetDiscoverySpecCsvOptions) + for k, item := range a { + items[k] = *flattenAssetDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoverySpecCsvOptionsSlice flattens the contents of AssetDiscoverySpecCsvOptions from a JSON +// response object. +func flattenAssetDiscoverySpecCsvOptionsSlice(c *Client, i interface{}, res *Asset) []AssetDiscoverySpecCsvOptions { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return []AssetDiscoverySpecCsvOptions{} + } + + items := make([]AssetDiscoverySpecCsvOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoverySpecCsvOptions expands an instance of AssetDiscoverySpecCsvOptions into a JSON +// request object. +func expandAssetDiscoverySpecCsvOptions(c *Client, f *AssetDiscoverySpecCsvOptions, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HeaderRows; !dcl.IsEmptyValueIndirect(v) { + m["headerRows"] = v + } + if v := f.Delimiter; !dcl.IsEmptyValueIndirect(v) { + m["delimiter"] = v + } + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenAssetDiscoverySpecCsvOptions flattens an instance of AssetDiscoverySpecCsvOptions from a JSON +// response object. +func flattenAssetDiscoverySpecCsvOptions(c *Client, i interface{}, res *Asset) *AssetDiscoverySpecCsvOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoverySpecCsvOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoverySpecCsvOptions + } + r.HeaderRows = dcl.FlattenInteger(m["headerRows"]) + r.Delimiter = dcl.FlattenString(m["delimiter"]) + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandAssetDiscoverySpecJsonOptionsMap expands the contents of AssetDiscoverySpecJsonOptions into a JSON +// request object. +func expandAssetDiscoverySpecJsonOptionsMap(c *Client, f map[string]AssetDiscoverySpecJsonOptions, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoverySpecJsonOptionsSlice expands the contents of AssetDiscoverySpecJsonOptions into a JSON +// request object. +func expandAssetDiscoverySpecJsonOptionsSlice(c *Client, f []AssetDiscoverySpecJsonOptions, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoverySpecJsonOptionsMap flattens the contents of AssetDiscoverySpecJsonOptions from a JSON +// response object. +func flattenAssetDiscoverySpecJsonOptionsMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoverySpecJsonOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return map[string]AssetDiscoverySpecJsonOptions{} + } + + items := make(map[string]AssetDiscoverySpecJsonOptions) + for k, item := range a { + items[k] = *flattenAssetDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoverySpecJsonOptionsSlice flattens the contents of AssetDiscoverySpecJsonOptions from a JSON +// response object. +func flattenAssetDiscoverySpecJsonOptionsSlice(c *Client, i interface{}, res *Asset) []AssetDiscoverySpecJsonOptions { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return []AssetDiscoverySpecJsonOptions{} + } + + items := make([]AssetDiscoverySpecJsonOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoverySpecJsonOptions expands an instance of AssetDiscoverySpecJsonOptions into a JSON +// request object. +func expandAssetDiscoverySpecJsonOptions(c *Client, f *AssetDiscoverySpecJsonOptions, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenAssetDiscoverySpecJsonOptions flattens an instance of AssetDiscoverySpecJsonOptions from a JSON +// response object. +func flattenAssetDiscoverySpecJsonOptions(c *Client, i interface{}, res *Asset) *AssetDiscoverySpecJsonOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoverySpecJsonOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoverySpecJsonOptions + } + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandAssetDiscoveryStatusMap expands the contents of AssetDiscoveryStatus into a JSON +// request object. +func expandAssetDiscoveryStatusMap(c *Client, f map[string]AssetDiscoveryStatus, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoveryStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoveryStatusSlice expands the contents of AssetDiscoveryStatus into a JSON +// request object. +func expandAssetDiscoveryStatusSlice(c *Client, f []AssetDiscoveryStatus, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoveryStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoveryStatusMap flattens the contents of AssetDiscoveryStatus from a JSON +// response object. +func flattenAssetDiscoveryStatusMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoveryStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoveryStatus{} + } + + if len(a) == 0 { + return map[string]AssetDiscoveryStatus{} + } + + items := make(map[string]AssetDiscoveryStatus) + for k, item := range a { + items[k] = *flattenAssetDiscoveryStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoveryStatusSlice flattens the contents of AssetDiscoveryStatus from a JSON +// response object. +func flattenAssetDiscoveryStatusSlice(c *Client, i interface{}, res *Asset) []AssetDiscoveryStatus { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoveryStatus{} + } + + if len(a) == 0 { + return []AssetDiscoveryStatus{} + } + + items := make([]AssetDiscoveryStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoveryStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoveryStatus expands an instance of AssetDiscoveryStatus into a JSON +// request object. +func expandAssetDiscoveryStatus(c *Client, f *AssetDiscoveryStatus, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.LastRunTime; !dcl.IsEmptyValueIndirect(v) { + m["lastRunTime"] = v + } + if v, err := expandAssetDiscoveryStatusStats(c, f.Stats, res); err != nil { + return nil, fmt.Errorf("error expanding Stats into stats: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["stats"] = v + } + if v := f.LastRunDuration; !dcl.IsEmptyValueIndirect(v) { + m["lastRunDuration"] = v + } + + return m, nil +} + +// flattenAssetDiscoveryStatus flattens an instance of AssetDiscoveryStatus from a JSON +// response object. +func flattenAssetDiscoveryStatus(c *Client, i interface{}, res *Asset) *AssetDiscoveryStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoveryStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoveryStatus + } + r.State = flattenAssetDiscoveryStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.LastRunTime = dcl.FlattenString(m["lastRunTime"]) + r.Stats = flattenAssetDiscoveryStatusStats(c, m["stats"], res) + r.LastRunDuration = dcl.FlattenString(m["lastRunDuration"]) + + return r +} + +// expandAssetDiscoveryStatusStatsMap expands the contents of AssetDiscoveryStatusStats into a JSON +// request object. +func expandAssetDiscoveryStatusStatsMap(c *Client, f map[string]AssetDiscoveryStatusStats, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoveryStatusStats(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoveryStatusStatsSlice expands the contents of AssetDiscoveryStatusStats into a JSON +// request object. +func expandAssetDiscoveryStatusStatsSlice(c *Client, f []AssetDiscoveryStatusStats, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoveryStatusStats(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoveryStatusStatsMap flattens the contents of AssetDiscoveryStatusStats from a JSON +// response object. +func flattenAssetDiscoveryStatusStatsMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoveryStatusStats { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoveryStatusStats{} + } + + if len(a) == 0 { + return map[string]AssetDiscoveryStatusStats{} + } + + items := make(map[string]AssetDiscoveryStatusStats) + for k, item := range a { + items[k] = *flattenAssetDiscoveryStatusStats(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoveryStatusStatsSlice flattens the contents of AssetDiscoveryStatusStats from a JSON +// response object. +func flattenAssetDiscoveryStatusStatsSlice(c *Client, i interface{}, res *Asset) []AssetDiscoveryStatusStats { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoveryStatusStats{} + } + + if len(a) == 0 { + return []AssetDiscoveryStatusStats{} + } + + items := make([]AssetDiscoveryStatusStats, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoveryStatusStats(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoveryStatusStats expands an instance of AssetDiscoveryStatusStats into a JSON +// request object. +func expandAssetDiscoveryStatusStats(c *Client, f *AssetDiscoveryStatusStats, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataItems; !dcl.IsEmptyValueIndirect(v) { + m["dataItems"] = v + } + if v := f.DataSize; !dcl.IsEmptyValueIndirect(v) { + m["dataSize"] = v + } + if v := f.Tables; !dcl.IsEmptyValueIndirect(v) { + m["tables"] = v + } + if v := f.Filesets; !dcl.IsEmptyValueIndirect(v) { + m["filesets"] = v + } + + return m, nil +} + +// flattenAssetDiscoveryStatusStats flattens an instance of AssetDiscoveryStatusStats from a JSON +// response object. +func flattenAssetDiscoveryStatusStats(c *Client, i interface{}, res *Asset) *AssetDiscoveryStatusStats { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoveryStatusStats{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoveryStatusStats + } + r.DataItems = dcl.FlattenInteger(m["dataItems"]) + r.DataSize = dcl.FlattenInteger(m["dataSize"]) + r.Tables = dcl.FlattenInteger(m["tables"]) + r.Filesets = dcl.FlattenInteger(m["filesets"]) + + return r +} + +// flattenAssetStateEnumMap flattens the contents of AssetStateEnum from a JSON +// response object. +func flattenAssetStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetStateEnum{} + } + + items := make(map[string]AssetStateEnum) + for k, item := range a { + items[k] = *flattenAssetStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetStateEnumSlice flattens the contents of AssetStateEnum from a JSON +// response object. +func flattenAssetStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetStateEnum{} + } + + if len(a) == 0 { + return []AssetStateEnum{} + } + + items := make([]AssetStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetStateEnum with the same value as that string. +func flattenAssetStateEnum(i interface{}) *AssetStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetStateEnumRef(s) +} + +// flattenAssetResourceSpecTypeEnumMap flattens the contents of AssetResourceSpecTypeEnum from a JSON +// response object. +func flattenAssetResourceSpecTypeEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceSpecTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceSpecTypeEnum{} + } + + if len(a) == 0 { + return map[string]AssetResourceSpecTypeEnum{} + } + + items := make(map[string]AssetResourceSpecTypeEnum) + for k, item := range a { + items[k] = *flattenAssetResourceSpecTypeEnum(item.(interface{})) + } + + return items +} + +// flattenAssetResourceSpecTypeEnumSlice flattens the contents of AssetResourceSpecTypeEnum from a JSON +// response object. +func flattenAssetResourceSpecTypeEnumSlice(c *Client, i interface{}, res *Asset) []AssetResourceSpecTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceSpecTypeEnum{} + } + + if len(a) == 0 { + return []AssetResourceSpecTypeEnum{} + } + + items := make([]AssetResourceSpecTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceSpecTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetResourceSpecTypeEnum asserts that an interface is a string, and returns a +// pointer to a *AssetResourceSpecTypeEnum with the same value as that string. +func flattenAssetResourceSpecTypeEnum(i interface{}) *AssetResourceSpecTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetResourceSpecTypeEnumRef(s) +} + +// flattenAssetResourceSpecReadAccessModeEnumMap flattens the contents of AssetResourceSpecReadAccessModeEnum from a JSON +// response object. +func flattenAssetResourceSpecReadAccessModeEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceSpecReadAccessModeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceSpecReadAccessModeEnum{} + } + + if len(a) == 0 { + return map[string]AssetResourceSpecReadAccessModeEnum{} + } + + items := make(map[string]AssetResourceSpecReadAccessModeEnum) + for k, item := range a { + items[k] = *flattenAssetResourceSpecReadAccessModeEnum(item.(interface{})) + } + + return items +} + +// flattenAssetResourceSpecReadAccessModeEnumSlice flattens the contents of AssetResourceSpecReadAccessModeEnum from a JSON +// response object. +func flattenAssetResourceSpecReadAccessModeEnumSlice(c *Client, i interface{}, res *Asset) []AssetResourceSpecReadAccessModeEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceSpecReadAccessModeEnum{} + } + + if len(a) == 0 { + return []AssetResourceSpecReadAccessModeEnum{} + } + + items := make([]AssetResourceSpecReadAccessModeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceSpecReadAccessModeEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetResourceSpecReadAccessModeEnum asserts that an interface is a string, and returns a +// pointer to a *AssetResourceSpecReadAccessModeEnum with the same value as that string. +func flattenAssetResourceSpecReadAccessModeEnum(i interface{}) *AssetResourceSpecReadAccessModeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetResourceSpecReadAccessModeEnumRef(s) +} + +// flattenAssetResourceStatusStateEnumMap flattens the contents of AssetResourceStatusStateEnum from a JSON +// response object. +func flattenAssetResourceStatusStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetResourceStatusStateEnum{} + } + + items := make(map[string]AssetResourceStatusStateEnum) + for k, item := range a { + items[k] = *flattenAssetResourceStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetResourceStatusStateEnumSlice flattens the contents of AssetResourceStatusStateEnum from a JSON +// response object. +func flattenAssetResourceStatusStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetResourceStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceStatusStateEnum{} + } + + if len(a) == 0 { + return []AssetResourceStatusStateEnum{} + } + + items := make([]AssetResourceStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetResourceStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetResourceStatusStateEnum with the same value as that string. +func flattenAssetResourceStatusStateEnum(i interface{}) *AssetResourceStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetResourceStatusStateEnumRef(s) +} + +// flattenAssetSecurityStatusStateEnumMap flattens the contents of AssetSecurityStatusStateEnum from a JSON +// response object. +func flattenAssetSecurityStatusStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetSecurityStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetSecurityStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetSecurityStatusStateEnum{} + } + + items := make(map[string]AssetSecurityStatusStateEnum) + for k, item := range a { + items[k] = *flattenAssetSecurityStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetSecurityStatusStateEnumSlice flattens the contents of AssetSecurityStatusStateEnum from a JSON +// response object. +func flattenAssetSecurityStatusStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetSecurityStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetSecurityStatusStateEnum{} + } + + if len(a) == 0 { + return []AssetSecurityStatusStateEnum{} + } + + items := make([]AssetSecurityStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetSecurityStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetSecurityStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetSecurityStatusStateEnum with the same value as that string. +func flattenAssetSecurityStatusStateEnum(i interface{}) *AssetSecurityStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetSecurityStatusStateEnumRef(s) +} + +// flattenAssetDiscoveryStatusStateEnumMap flattens the contents of AssetDiscoveryStatusStateEnum from a JSON +// response object. +func flattenAssetDiscoveryStatusStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoveryStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoveryStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetDiscoveryStatusStateEnum{} + } + + items := make(map[string]AssetDiscoveryStatusStateEnum) + for k, item := range a { + items[k] = *flattenAssetDiscoveryStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetDiscoveryStatusStateEnumSlice flattens the contents of AssetDiscoveryStatusStateEnum from a JSON +// response object. +func flattenAssetDiscoveryStatusStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetDiscoveryStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoveryStatusStateEnum{} + } + + if len(a) == 0 { + return []AssetDiscoveryStatusStateEnum{} + } + + items := make([]AssetDiscoveryStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoveryStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetDiscoveryStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetDiscoveryStatusStateEnum with the same value as that string. +func flattenAssetDiscoveryStatusStateEnum(i interface{}) *AssetDiscoveryStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetDiscoveryStatusStateEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Asset) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalAsset(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.DataplexZone == nil && ncr.DataplexZone == nil { + c.Config.Logger.Info("Both DataplexZone fields null - considering equal.") + } else if nr.DataplexZone == nil || ncr.DataplexZone == nil { + c.Config.Logger.Info("Only one DataplexZone field is null - considering unequal.") + return false + } else if *nr.DataplexZone != *ncr.DataplexZone { + return false + } + if nr.Lake == nil && ncr.Lake == nil { + c.Config.Logger.Info("Both Lake fields null - considering equal.") + } else if nr.Lake == nil || ncr.Lake == nil { + c.Config.Logger.Info("Only one Lake field is null - considering unequal.") + return false + } else if *nr.Lake != *ncr.Lake { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type assetDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp assetApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToAssetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]assetDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []assetDiff + // For each operation name, create a assetDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := assetDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToAssetApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToAssetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (assetApiOperation, error) { + switch opName { + + case "updateAssetUpdateAssetOperation": + return &updateAssetUpdateAssetOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractAssetFields(r *Asset) error { + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &AssetResourceSpec{} + } + if err := extractAssetResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vResourceStatus := r.ResourceStatus + if vResourceStatus == nil { + // note: explicitly not the empty object. + vResourceStatus = &AssetResourceStatus{} + } + if err := extractAssetResourceStatusFields(r, vResourceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceStatus) { + r.ResourceStatus = vResourceStatus + } + vSecurityStatus := r.SecurityStatus + if vSecurityStatus == nil { + // note: explicitly not the empty object. + vSecurityStatus = &AssetSecurityStatus{} + } + if err := extractAssetSecurityStatusFields(r, vSecurityStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityStatus) { + r.SecurityStatus = vSecurityStatus + } + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &AssetDiscoverySpec{} + } + if err := extractAssetDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vDiscoveryStatus := r.DiscoveryStatus + if vDiscoveryStatus == nil { + // note: explicitly not the empty object. + vDiscoveryStatus = &AssetDiscoveryStatus{} + } + if err := extractAssetDiscoveryStatusFields(r, vDiscoveryStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoveryStatus) { + r.DiscoveryStatus = vDiscoveryStatus + } + return nil +} +func extractAssetResourceSpecFields(r *Asset, o *AssetResourceSpec) error { + return nil +} +func extractAssetResourceStatusFields(r *Asset, o *AssetResourceStatus) error { + return nil +} +func extractAssetSecurityStatusFields(r *Asset, o *AssetSecurityStatus) error { + return nil +} +func extractAssetDiscoverySpecFields(r *Asset, o *AssetDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &AssetDiscoverySpecCsvOptions{} + } + if err := extractAssetDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &AssetDiscoverySpecJsonOptions{} + } + if err := extractAssetDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func extractAssetDiscoverySpecCsvOptionsFields(r *Asset, o *AssetDiscoverySpecCsvOptions) error { + return nil +} +func extractAssetDiscoverySpecJsonOptionsFields(r *Asset, o *AssetDiscoverySpecJsonOptions) error { + return nil +} +func extractAssetDiscoveryStatusFields(r *Asset, o *AssetDiscoveryStatus) error { + vStats := o.Stats + if vStats == nil { + // note: explicitly not the empty object. + vStats = &AssetDiscoveryStatusStats{} + } + if err := extractAssetDiscoveryStatusStatsFields(r, vStats); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStats) { + o.Stats = vStats + } + return nil +} +func extractAssetDiscoveryStatusStatsFields(r *Asset, o *AssetDiscoveryStatusStats) error { + return nil +} + +func postReadExtractAssetFields(r *Asset) error { + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &AssetResourceSpec{} + } + if err := postReadExtractAssetResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vResourceStatus := r.ResourceStatus + if vResourceStatus == nil { + // note: explicitly not the empty object. + vResourceStatus = &AssetResourceStatus{} + } + if err := postReadExtractAssetResourceStatusFields(r, vResourceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceStatus) { + r.ResourceStatus = vResourceStatus + } + vSecurityStatus := r.SecurityStatus + if vSecurityStatus == nil { + // note: explicitly not the empty object. + vSecurityStatus = &AssetSecurityStatus{} + } + if err := postReadExtractAssetSecurityStatusFields(r, vSecurityStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityStatus) { + r.SecurityStatus = vSecurityStatus + } + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &AssetDiscoverySpec{} + } + if err := postReadExtractAssetDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vDiscoveryStatus := r.DiscoveryStatus + if vDiscoveryStatus == nil { + // note: explicitly not the empty object. + vDiscoveryStatus = &AssetDiscoveryStatus{} + } + if err := postReadExtractAssetDiscoveryStatusFields(r, vDiscoveryStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoveryStatus) { + r.DiscoveryStatus = vDiscoveryStatus + } + return nil +} +func postReadExtractAssetResourceSpecFields(r *Asset, o *AssetResourceSpec) error { + return nil +} +func postReadExtractAssetResourceStatusFields(r *Asset, o *AssetResourceStatus) error { + return nil +} +func postReadExtractAssetSecurityStatusFields(r *Asset, o *AssetSecurityStatus) error { + return nil +} +func postReadExtractAssetDiscoverySpecFields(r *Asset, o *AssetDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &AssetDiscoverySpecCsvOptions{} + } + if err := extractAssetDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &AssetDiscoverySpecJsonOptions{} + } + if err := extractAssetDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func postReadExtractAssetDiscoverySpecCsvOptionsFields(r *Asset, o *AssetDiscoverySpecCsvOptions) error { + return nil +} +func postReadExtractAssetDiscoverySpecJsonOptionsFields(r *Asset, o *AssetDiscoverySpecJsonOptions) error { + return nil +} +func postReadExtractAssetDiscoveryStatusFields(r *Asset, o *AssetDiscoveryStatus) error { + vStats := o.Stats + if vStats == nil { + // note: explicitly not the empty object. + vStats = &AssetDiscoveryStatusStats{} + } + if err := extractAssetDiscoveryStatusStatsFields(r, vStats); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStats) { + o.Stats = vStats + } + return nil +} +func postReadExtractAssetDiscoveryStatusStatsFields(r *Asset, o *AssetDiscoveryStatusStats) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/client.go b/mmv1/third_party/terraform/services/dataplex/client.go new file mode 100644 index 000000000000..77a502b18760 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/client.go @@ -0,0 +1,18 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go b/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go new file mode 100644 index 000000000000..113ae996c819 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go @@ -0,0 +1,11 @@ +package dataplex + +// flattenZoneDiscoverySpecEnable flattens an instance of discovery spec from a JSON +// response object. +func flattenZoneDiscoverySpecEnable(c *Client, i any, _ *Zone) *bool { + v, ok := i.(bool) + if !ok { + v = false + } + return &v +} diff --git a/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl new file mode 100644 index 000000000000..73ec65e888fc --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl @@ -0,0 +1,598 @@ +package dataplex + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Lake struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *LakeStateEnum `json:"state"` + ServiceAccount *string `json:"serviceAccount"` + Metastore *LakeMetastore `json:"metastore"` + AssetStatus *LakeAssetStatus `json:"assetStatus"` + MetastoreStatus *LakeMetastoreStatus `json:"metastoreStatus"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *Lake) String() string { + return dcl.SprintResource(r) +} + +// The enum LakeStateEnum. +type LakeStateEnum string + +// LakeStateEnumRef returns a *LakeStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func LakeStateEnumRef(s string) *LakeStateEnum { + v := LakeStateEnum(s) + return &v +} + +func (v LakeStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ACTIVE", "CREATING", "DELETING", "ACTION_REQUIRED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "LakeStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum LakeMetastoreStatusStateEnum. +type LakeMetastoreStatusStateEnum string + +// LakeMetastoreStatusStateEnumRef returns a *LakeMetastoreStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func LakeMetastoreStatusStateEnumRef(s string) *LakeMetastoreStatusStateEnum { + v := LakeMetastoreStatusStateEnum(s) + return &v +} + +func (v LakeMetastoreStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "NONE", "READY", "UPDATING", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "LakeMetastoreStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +type LakeMetastore struct { + empty bool `json:"-"` + Service *string `json:"service"` +} + +type jsonLakeMetastore LakeMetastore + +func (r *LakeMetastore) UnmarshalJSON(data []byte) error { + var res jsonLakeMetastore + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyLakeMetastore + } else { + + r.Service = res.Service + + } + return nil +} + +// This object is used to assert a desired state where this LakeMetastore is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyLakeMetastore *LakeMetastore = &LakeMetastore{empty: true} + +func (r *LakeMetastore) Empty() bool { + return r.empty +} + +func (r *LakeMetastore) String() string { + return dcl.SprintResource(r) +} + +func (r *LakeMetastore) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type LakeAssetStatus struct { + empty bool `json:"-"` + UpdateTime *string `json:"updateTime"` + ActiveAssets *int64 `json:"activeAssets"` + SecurityPolicyApplyingAssets *int64 `json:"securityPolicyApplyingAssets"` +} + +type jsonLakeAssetStatus LakeAssetStatus + +func (r *LakeAssetStatus) UnmarshalJSON(data []byte) error { + var res jsonLakeAssetStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyLakeAssetStatus + } else { + + r.UpdateTime = res.UpdateTime + + r.ActiveAssets = res.ActiveAssets + + r.SecurityPolicyApplyingAssets = res.SecurityPolicyApplyingAssets + + } + return nil +} + +// This object is used to assert a desired state where this LakeAssetStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyLakeAssetStatus *LakeAssetStatus = &LakeAssetStatus{empty: true} + +func (r *LakeAssetStatus) Empty() bool { + return r.empty +} + +func (r *LakeAssetStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *LakeAssetStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type LakeMetastoreStatus struct { + empty bool `json:"-"` + State *LakeMetastoreStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` + Endpoint *string `json:"endpoint"` +} + +type jsonLakeMetastoreStatus LakeMetastoreStatus + +func (r *LakeMetastoreStatus) UnmarshalJSON(data []byte) error { + var res jsonLakeMetastoreStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyLakeMetastoreStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + r.Endpoint = res.Endpoint + + } + return nil +} + +// This object is used to assert a desired state where this LakeMetastoreStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyLakeMetastoreStatus *LakeMetastoreStatus = &LakeMetastoreStatus{empty: true} + +func (r *LakeMetastoreStatus) Empty() bool { + return r.empty +} + +func (r *LakeMetastoreStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *LakeMetastoreStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Lake) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataplex", + Type: "Lake", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataplex", +{{- end }} + } +} + +func (r *Lake) ID() (string, error) { + if err := extractLakeFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "service_account": dcl.ValueOrEmptyString(nr.ServiceAccount), + "metastore": dcl.ValueOrEmptyString(nr.Metastore), + "asset_status": dcl.ValueOrEmptyString(nr.AssetStatus), + "metastore_status": dcl.ValueOrEmptyString(nr.MetastoreStatus), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const LakeMaxPage = -1 + +type LakeList struct { + Items []*Lake + + nextToken string + + pageSize int32 + + resource *Lake +} + +func (l *LakeList) HasNext() bool { + return l.nextToken != "" +} + +func (l *LakeList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listLake(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListLake(ctx context.Context, project, location string) (*LakeList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListLakeWithMaxResults(ctx, project, location, LakeMaxPage) + +} + +func (c *Client) ListLakeWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*LakeList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Lake{ + Project: &project, + Location: &location, + } + items, token, err := c.listLake(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &LakeList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetLake(ctx context.Context, r *Lake) (*Lake, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractLakeFields(r) + + b, err := c.getLakeRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalLake(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeLakeNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractLakeFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteLake(ctx context.Context, r *Lake) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Lake resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Lake...") + deleteOp := deleteLakeOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllLake deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllLake(ctx context.Context, project, location string, filter func(*Lake) bool) error { + listObj, err := c.ListLake(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllLake(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllLake(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyLake(ctx context.Context, rawDesired *Lake, opts ...dcl.ApplyOption) (*Lake, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Lake + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyLakeHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyLakeHelper(c *Client, ctx context.Context, rawDesired *Lake, opts ...dcl.ApplyOption) (*Lake, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyLake...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractLakeFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.lakeDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToLakeDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []lakeApiOperation + if create { + ops = append(ops, &createLakeOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyLakeDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyLakeDiff(c *Client, ctx context.Context, desired *Lake, rawDesired *Lake, ops []lakeApiOperation, opts ...dcl.ApplyOption) (*Lake, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetLake(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createLakeOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapLake(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeLakeNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeLakeNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeLakeDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractLakeFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractLakeFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffLake(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Lake) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl new file mode 100644 index 000000000000..ee2368b61e26 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl @@ -0,0 +1,2021 @@ +package dataplex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Lake) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Metastore) { + if err := r.Metastore.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AssetStatus) { + if err := r.AssetStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MetastoreStatus) { + if err := r.MetastoreStatus.validate(); err != nil { + return err + } + } + return nil +} +func (r *LakeMetastore) validate() error { + return nil +} +func (r *LakeAssetStatus) validate() error { + return nil +} +func (r *LakeMetastoreStatus) validate() error { + return nil +} +func (r *Lake) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataplex.googleapis.com/v1/", params) +} + +func (r *Lake) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Lake) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes", nr.basePath(), userBasePath, params), nil + +} + +func (r *Lake) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes?lakeId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Lake) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Lake) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Lake) SetPolicyVerb() string { + return "" +} + +func (r *Lake) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Lake) IAMPolicyVersion() int { + return 3 +} + +// lakeApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type lakeApiOperation interface { + do(context.Context, *Lake, *Client) error +} + +// newUpdateLakeUpdateLakeRequest creates a request for an +// Lake resource's UpdateLake update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateLakeUpdateLakeRequest(ctx context.Context, f *Lake, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["name"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandLakeMetastore(c, f.Metastore, res); err != nil { + return nil, fmt.Errorf("error expanding Metastore into metastore: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["metastore"] = v + } + if v, err := expandLakeAssetStatus(c, f.AssetStatus, res); err != nil { + return nil, fmt.Errorf("error expanding AssetStatus into assetStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["assetStatus"] = v + } + if v, err := expandLakeMetastoreStatus(c, f.MetastoreStatus, res); err != nil { + return nil, fmt.Errorf("error expanding MetastoreStatus into metastoreStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["metastoreStatus"] = v + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateLakeUpdateLakeRequest converts the update into +// the final JSON request body. +func marshalUpdateLakeUpdateLakeRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateLakeUpdateLakeOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateLakeUpdateLakeOperation) do(ctx context.Context, r *Lake, c *Client) error { + _, err := c.GetLake(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateLake") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateLakeUpdateLakeRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateLakeUpdateLakeRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listLakeRaw(ctx context.Context, r *Lake, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != LakeMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listLakeOperation struct { + Lakes []map[string]interface{} `json:"lakes"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listLake(ctx context.Context, r *Lake, pageToken string, pageSize int32) ([]*Lake, string, error) { + b, err := c.listLakeRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listLakeOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Lake + for _, v := range m.Lakes { + res, err := unmarshalMapLake(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllLake(ctx context.Context, f func(*Lake) bool, resources []*Lake) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteLake(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteLakeOperation struct{} + +func (op *deleteLakeOperation) do(ctx context.Context, r *Lake, c *Client) error { + r, err := c.GetLake(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Lake not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetLake checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetLake(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createLakeOperation struct { + response map[string]interface{} +} + +func (op *createLakeOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createLakeOperation) do(ctx context.Context, r *Lake, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetLake(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getLakeRaw(ctx context.Context, r *Lake) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) lakeDiffsForRawDesired(ctx context.Context, rawDesired *Lake, opts ...dcl.ApplyOption) (initial, desired *Lake, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Lake + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Lake); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Lake, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetLake(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Lake resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Lake resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Lake resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeLakeDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Lake: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Lake: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractLakeFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeLakeInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Lake: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeLakeDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Lake: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffLake(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeLakeInitialState(rawInitial, rawDesired *Lake) (*Lake, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeLakeDesiredState(rawDesired, rawInitial *Lake, opts ...dcl.ApplyOption) (*Lake, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Metastore = canonicalizeLakeMetastore(rawDesired.Metastore, nil, opts...) + rawDesired.AssetStatus = canonicalizeLakeAssetStatus(rawDesired.AssetStatus, nil, opts...) + rawDesired.MetastoreStatus = canonicalizeLakeMetastoreStatus(rawDesired.MetastoreStatus, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Lake{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + canonicalDesired.Metastore = canonicalizeLakeMetastore(rawDesired.Metastore, rawInitial.Metastore, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeLakeNewState(c *Client, rawNew, rawDesired *Lake) (*Lake, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.ServiceAccount) && dcl.IsEmptyValueIndirect(rawDesired.ServiceAccount) { + rawNew.ServiceAccount = rawDesired.ServiceAccount + } else { + if dcl.StringCanonicalize(rawDesired.ServiceAccount, rawNew.ServiceAccount) { + rawNew.ServiceAccount = rawDesired.ServiceAccount + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Metastore) && dcl.IsEmptyValueIndirect(rawDesired.Metastore) { + rawNew.Metastore = rawDesired.Metastore + } else { + rawNew.Metastore = canonicalizeNewLakeMetastore(c, rawDesired.Metastore, rawNew.Metastore) + } + + if dcl.IsEmptyValueIndirect(rawNew.AssetStatus) && dcl.IsEmptyValueIndirect(rawDesired.AssetStatus) { + rawNew.AssetStatus = rawDesired.AssetStatus + } else { + rawNew.AssetStatus = canonicalizeNewLakeAssetStatus(c, rawDesired.AssetStatus, rawNew.AssetStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.MetastoreStatus) && dcl.IsEmptyValueIndirect(rawDesired.MetastoreStatus) { + rawNew.MetastoreStatus = rawDesired.MetastoreStatus + } else { + rawNew.MetastoreStatus = canonicalizeNewLakeMetastoreStatus(c, rawDesired.MetastoreStatus, rawNew.MetastoreStatus) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeLakeMetastore(des, initial *LakeMetastore, opts ...dcl.ApplyOption) *LakeMetastore { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &LakeMetastore{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + + return cDes +} + +func canonicalizeLakeMetastoreSlice(des, initial []LakeMetastore, opts ...dcl.ApplyOption) []LakeMetastore { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]LakeMetastore, 0, len(des)) + for _, d := range des { + cd := canonicalizeLakeMetastore(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]LakeMetastore, 0, len(des)) + for i, d := range des { + cd := canonicalizeLakeMetastore(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewLakeMetastore(c *Client, des, nw *LakeMetastore) *LakeMetastore { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for LakeMetastore while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + + return nw +} + +func canonicalizeNewLakeMetastoreSet(c *Client, des, nw []LakeMetastore) []LakeMetastore { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []LakeMetastore + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareLakeMetastoreNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewLakeMetastore(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewLakeMetastoreSlice(c *Client, des, nw []LakeMetastore) []LakeMetastore { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []LakeMetastore + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewLakeMetastore(c, &d, &n)) + } + + return items +} + +func canonicalizeLakeAssetStatus(des, initial *LakeAssetStatus, opts ...dcl.ApplyOption) *LakeAssetStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &LakeAssetStatus{} + + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.IsZeroValue(des.ActiveAssets) || (dcl.IsEmptyValueIndirect(des.ActiveAssets) && dcl.IsEmptyValueIndirect(initial.ActiveAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ActiveAssets = initial.ActiveAssets + } else { + cDes.ActiveAssets = des.ActiveAssets + } + if dcl.IsZeroValue(des.SecurityPolicyApplyingAssets) || (dcl.IsEmptyValueIndirect(des.SecurityPolicyApplyingAssets) && dcl.IsEmptyValueIndirect(initial.SecurityPolicyApplyingAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SecurityPolicyApplyingAssets = initial.SecurityPolicyApplyingAssets + } else { + cDes.SecurityPolicyApplyingAssets = des.SecurityPolicyApplyingAssets + } + + return cDes +} + +func canonicalizeLakeAssetStatusSlice(des, initial []LakeAssetStatus, opts ...dcl.ApplyOption) []LakeAssetStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]LakeAssetStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeLakeAssetStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]LakeAssetStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeLakeAssetStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewLakeAssetStatus(c *Client, des, nw *LakeAssetStatus) *LakeAssetStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for LakeAssetStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewLakeAssetStatusSet(c *Client, des, nw []LakeAssetStatus) []LakeAssetStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []LakeAssetStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareLakeAssetStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewLakeAssetStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewLakeAssetStatusSlice(c *Client, des, nw []LakeAssetStatus) []LakeAssetStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []LakeAssetStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewLakeAssetStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeLakeMetastoreStatus(des, initial *LakeMetastoreStatus, opts ...dcl.ApplyOption) *LakeMetastoreStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &LakeMetastoreStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.StringCanonicalize(des.Endpoint, initial.Endpoint) || dcl.IsZeroValue(des.Endpoint) { + cDes.Endpoint = initial.Endpoint + } else { + cDes.Endpoint = des.Endpoint + } + + return cDes +} + +func canonicalizeLakeMetastoreStatusSlice(des, initial []LakeMetastoreStatus, opts ...dcl.ApplyOption) []LakeMetastoreStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]LakeMetastoreStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeLakeMetastoreStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]LakeMetastoreStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeLakeMetastoreStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewLakeMetastoreStatus(c *Client, des, nw *LakeMetastoreStatus) *LakeMetastoreStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for LakeMetastoreStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + if dcl.StringCanonicalize(des.Endpoint, nw.Endpoint) { + nw.Endpoint = des.Endpoint + } + + return nw +} + +func canonicalizeNewLakeMetastoreStatusSet(c *Client, des, nw []LakeMetastoreStatus) []LakeMetastoreStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []LakeMetastoreStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareLakeMetastoreStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewLakeMetastoreStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewLakeMetastoreStatusSlice(c *Client, des, nw []LakeMetastoreStatus) []LakeMetastoreStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []LakeMetastoreStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewLakeMetastoreStatus(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffLake(c *Client, desired, actual *Lake, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metastore, actual.Metastore, dcl.DiffInfo{ObjectFunction: compareLakeMetastoreNewStyle, EmptyObject: EmptyLakeMetastore, OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Metastore")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssetStatus, actual.AssetStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareLakeAssetStatusNewStyle, EmptyObject: EmptyLakeAssetStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AssetStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetastoreStatus, actual.MetastoreStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareLakeMetastoreStatusNewStyle, EmptyObject: EmptyLakeMetastoreStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareLakeMetastoreNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*LakeMetastore) + if !ok { + desiredNotPointer, ok := d.(LakeMetastore) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastore or *LakeMetastore", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*LakeMetastore) + if !ok { + actualNotPointer, ok := a.(LakeMetastore) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastore", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareLakeAssetStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*LakeAssetStatus) + if !ok { + desiredNotPointer, ok := d.(LakeAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeAssetStatus or *LakeAssetStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*LakeAssetStatus) + if !ok { + actualNotPointer, ok := a.(LakeAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeAssetStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ActiveAssets, actual.ActiveAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("ActiveAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityPolicyApplyingAssets, actual.SecurityPolicyApplyingAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("SecurityPolicyApplyingAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareLakeMetastoreStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*LakeMetastoreStatus) + if !ok { + desiredNotPointer, ok := d.(LakeMetastoreStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastoreStatus or *LakeMetastoreStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*LakeMetastoreStatus) + if !ok { + actualNotPointer, ok := a.(LakeMetastoreStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastoreStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Lake) urlNormalized() *Lake { + normalized := dcl.Copy(*r).(Lake) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.ServiceAccount = dcl.SelfLinkToName(r.ServiceAccount) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Lake) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateLake" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Lake resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Lake) marshal(c *Client) ([]byte, error) { + m, err := expandLake(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Lake: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalLake decodes JSON responses into the Lake resource schema. +func unmarshalLake(b []byte, c *Client, res *Lake) (*Lake, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapLake(m, c, res) +} + +func unmarshalMapLake(m map[string]interface{}, c *Client, res *Lake) (*Lake, error) { + + flattened := flattenLake(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandLake expands Lake into a JSON request object. +func expandLake(c *Client, f *Lake) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v, err := expandLakeMetastore(c, f.Metastore, res); err != nil { + return nil, fmt.Errorf("error expanding Metastore into metastore: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["metastore"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenLake flattens Lake from a JSON request object into the +// Lake type. +func flattenLake(c *Client, i interface{}, res *Lake) *Lake { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Lake{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenLakeStateEnum(m["state"]) + resultRes.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + resultRes.Metastore = flattenLakeMetastore(c, m["metastore"], res) + resultRes.AssetStatus = flattenLakeAssetStatus(c, m["assetStatus"], res) + resultRes.MetastoreStatus = flattenLakeMetastoreStatus(c, m["metastoreStatus"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandLakeMetastoreMap expands the contents of LakeMetastore into a JSON +// request object. +func expandLakeMetastoreMap(c *Client, f map[string]LakeMetastore, res *Lake) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandLakeMetastore(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandLakeMetastoreSlice expands the contents of LakeMetastore into a JSON +// request object. +func expandLakeMetastoreSlice(c *Client, f []LakeMetastore, res *Lake) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandLakeMetastore(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenLakeMetastoreMap flattens the contents of LakeMetastore from a JSON +// response object. +func flattenLakeMetastoreMap(c *Client, i interface{}, res *Lake) map[string]LakeMetastore { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeMetastore{} + } + + if len(a) == 0 { + return map[string]LakeMetastore{} + } + + items := make(map[string]LakeMetastore) + for k, item := range a { + items[k] = *flattenLakeMetastore(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenLakeMetastoreSlice flattens the contents of LakeMetastore from a JSON +// response object. +func flattenLakeMetastoreSlice(c *Client, i interface{}, res *Lake) []LakeMetastore { + a, ok := i.([]interface{}) + if !ok { + return []LakeMetastore{} + } + + if len(a) == 0 { + return []LakeMetastore{} + } + + items := make([]LakeMetastore, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeMetastore(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandLakeMetastore expands an instance of LakeMetastore into a JSON +// request object. +func expandLakeMetastore(c *Client, f *LakeMetastore, res *Lake) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + + return m, nil +} + +// flattenLakeMetastore flattens an instance of LakeMetastore from a JSON +// response object. +func flattenLakeMetastore(c *Client, i interface{}, res *Lake) *LakeMetastore { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &LakeMetastore{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyLakeMetastore + } + r.Service = dcl.FlattenString(m["service"]) + + return r +} + +// expandLakeAssetStatusMap expands the contents of LakeAssetStatus into a JSON +// request object. +func expandLakeAssetStatusMap(c *Client, f map[string]LakeAssetStatus, res *Lake) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandLakeAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandLakeAssetStatusSlice expands the contents of LakeAssetStatus into a JSON +// request object. +func expandLakeAssetStatusSlice(c *Client, f []LakeAssetStatus, res *Lake) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandLakeAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenLakeAssetStatusMap flattens the contents of LakeAssetStatus from a JSON +// response object. +func flattenLakeAssetStatusMap(c *Client, i interface{}, res *Lake) map[string]LakeAssetStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeAssetStatus{} + } + + if len(a) == 0 { + return map[string]LakeAssetStatus{} + } + + items := make(map[string]LakeAssetStatus) + for k, item := range a { + items[k] = *flattenLakeAssetStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenLakeAssetStatusSlice flattens the contents of LakeAssetStatus from a JSON +// response object. +func flattenLakeAssetStatusSlice(c *Client, i interface{}, res *Lake) []LakeAssetStatus { + a, ok := i.([]interface{}) + if !ok { + return []LakeAssetStatus{} + } + + if len(a) == 0 { + return []LakeAssetStatus{} + } + + items := make([]LakeAssetStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeAssetStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandLakeAssetStatus expands an instance of LakeAssetStatus into a JSON +// request object. +func expandLakeAssetStatus(c *Client, f *LakeAssetStatus, res *Lake) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.ActiveAssets; !dcl.IsEmptyValueIndirect(v) { + m["activeAssets"] = v + } + if v := f.SecurityPolicyApplyingAssets; !dcl.IsEmptyValueIndirect(v) { + m["securityPolicyApplyingAssets"] = v + } + + return m, nil +} + +// flattenLakeAssetStatus flattens an instance of LakeAssetStatus from a JSON +// response object. +func flattenLakeAssetStatus(c *Client, i interface{}, res *Lake) *LakeAssetStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &LakeAssetStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyLakeAssetStatus + } + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.ActiveAssets = dcl.FlattenInteger(m["activeAssets"]) + r.SecurityPolicyApplyingAssets = dcl.FlattenInteger(m["securityPolicyApplyingAssets"]) + + return r +} + +// expandLakeMetastoreStatusMap expands the contents of LakeMetastoreStatus into a JSON +// request object. +func expandLakeMetastoreStatusMap(c *Client, f map[string]LakeMetastoreStatus, res *Lake) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandLakeMetastoreStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandLakeMetastoreStatusSlice expands the contents of LakeMetastoreStatus into a JSON +// request object. +func expandLakeMetastoreStatusSlice(c *Client, f []LakeMetastoreStatus, res *Lake) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandLakeMetastoreStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenLakeMetastoreStatusMap flattens the contents of LakeMetastoreStatus from a JSON +// response object. +func flattenLakeMetastoreStatusMap(c *Client, i interface{}, res *Lake) map[string]LakeMetastoreStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeMetastoreStatus{} + } + + if len(a) == 0 { + return map[string]LakeMetastoreStatus{} + } + + items := make(map[string]LakeMetastoreStatus) + for k, item := range a { + items[k] = *flattenLakeMetastoreStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenLakeMetastoreStatusSlice flattens the contents of LakeMetastoreStatus from a JSON +// response object. +func flattenLakeMetastoreStatusSlice(c *Client, i interface{}, res *Lake) []LakeMetastoreStatus { + a, ok := i.([]interface{}) + if !ok { + return []LakeMetastoreStatus{} + } + + if len(a) == 0 { + return []LakeMetastoreStatus{} + } + + items := make([]LakeMetastoreStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeMetastoreStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandLakeMetastoreStatus expands an instance of LakeMetastoreStatus into a JSON +// request object. +func expandLakeMetastoreStatus(c *Client, f *LakeMetastoreStatus, res *Lake) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.Endpoint; !dcl.IsEmptyValueIndirect(v) { + m["endpoint"] = v + } + + return m, nil +} + +// flattenLakeMetastoreStatus flattens an instance of LakeMetastoreStatus from a JSON +// response object. +func flattenLakeMetastoreStatus(c *Client, i interface{}, res *Lake) *LakeMetastoreStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &LakeMetastoreStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyLakeMetastoreStatus + } + r.State = flattenLakeMetastoreStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.Endpoint = dcl.FlattenString(m["endpoint"]) + + return r +} + +// flattenLakeStateEnumMap flattens the contents of LakeStateEnum from a JSON +// response object. +func flattenLakeStateEnumMap(c *Client, i interface{}, res *Lake) map[string]LakeStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeStateEnum{} + } + + if len(a) == 0 { + return map[string]LakeStateEnum{} + } + + items := make(map[string]LakeStateEnum) + for k, item := range a { + items[k] = *flattenLakeStateEnum(item.(interface{})) + } + + return items +} + +// flattenLakeStateEnumSlice flattens the contents of LakeStateEnum from a JSON +// response object. +func flattenLakeStateEnumSlice(c *Client, i interface{}, res *Lake) []LakeStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []LakeStateEnum{} + } + + if len(a) == 0 { + return []LakeStateEnum{} + } + + items := make([]LakeStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeStateEnum(item.(interface{}))) + } + + return items +} + +// flattenLakeStateEnum asserts that an interface is a string, and returns a +// pointer to a *LakeStateEnum with the same value as that string. +func flattenLakeStateEnum(i interface{}) *LakeStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return LakeStateEnumRef(s) +} + +// flattenLakeMetastoreStatusStateEnumMap flattens the contents of LakeMetastoreStatusStateEnum from a JSON +// response object. +func flattenLakeMetastoreStatusStateEnumMap(c *Client, i interface{}, res *Lake) map[string]LakeMetastoreStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeMetastoreStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]LakeMetastoreStatusStateEnum{} + } + + items := make(map[string]LakeMetastoreStatusStateEnum) + for k, item := range a { + items[k] = *flattenLakeMetastoreStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenLakeMetastoreStatusStateEnumSlice flattens the contents of LakeMetastoreStatusStateEnum from a JSON +// response object. +func flattenLakeMetastoreStatusStateEnumSlice(c *Client, i interface{}, res *Lake) []LakeMetastoreStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []LakeMetastoreStatusStateEnum{} + } + + if len(a) == 0 { + return []LakeMetastoreStatusStateEnum{} + } + + items := make([]LakeMetastoreStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeMetastoreStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenLakeMetastoreStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *LakeMetastoreStatusStateEnum with the same value as that string. +func flattenLakeMetastoreStatusStateEnum(i interface{}) *LakeMetastoreStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return LakeMetastoreStatusStateEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Lake) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalLake(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type lakeDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp lakeApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToLakeDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]lakeDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []lakeDiff + // For each operation name, create a lakeDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := lakeDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToLakeApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToLakeApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (lakeApiOperation, error) { + switch opName { + + case "updateLakeUpdateLakeOperation": + return &updateLakeUpdateLakeOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractLakeFields(r *Lake) error { + vMetastore := r.Metastore + if vMetastore == nil { + // note: explicitly not the empty object. + vMetastore = &LakeMetastore{} + } + if err := extractLakeMetastoreFields(r, vMetastore); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastore) { + r.Metastore = vMetastore + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &LakeAssetStatus{} + } + if err := extractLakeAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + vMetastoreStatus := r.MetastoreStatus + if vMetastoreStatus == nil { + // note: explicitly not the empty object. + vMetastoreStatus = &LakeMetastoreStatus{} + } + if err := extractLakeMetastoreStatusFields(r, vMetastoreStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreStatus) { + r.MetastoreStatus = vMetastoreStatus + } + return nil +} +func extractLakeMetastoreFields(r *Lake, o *LakeMetastore) error { + return nil +} +func extractLakeAssetStatusFields(r *Lake, o *LakeAssetStatus) error { + return nil +} +func extractLakeMetastoreStatusFields(r *Lake, o *LakeMetastoreStatus) error { + return nil +} + +func postReadExtractLakeFields(r *Lake) error { + vMetastore := r.Metastore + if vMetastore == nil { + // note: explicitly not the empty object. + vMetastore = &LakeMetastore{} + } + if err := postReadExtractLakeMetastoreFields(r, vMetastore); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastore) { + r.Metastore = vMetastore + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &LakeAssetStatus{} + } + if err := postReadExtractLakeAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + vMetastoreStatus := r.MetastoreStatus + if vMetastoreStatus == nil { + // note: explicitly not the empty object. + vMetastoreStatus = &LakeMetastoreStatus{} + } + if err := postReadExtractLakeMetastoreStatusFields(r, vMetastoreStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreStatus) { + r.MetastoreStatus = vMetastoreStatus + } + return nil +} +func postReadExtractLakeMetastoreFields(r *Lake, o *LakeMetastore) error { + return nil +} +func postReadExtractLakeAssetStatusFields(r *Lake, o *LakeAssetStatus) error { + return nil +} +func postReadExtractLakeMetastoreStatusFields(r *Lake, o *LakeMetastoreStatus) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go new file mode 100644 index 000000000000..93a630010fe0 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLDataplexClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.DataplexBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go new file mode 100644 index 000000000000..068f9d9a77c6 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go @@ -0,0 +1,909 @@ +package dataplex + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataplexAsset() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexAssetCreate, + Read: resourceDataplexAssetRead, + Update: resourceDataplexAssetUpdate, + Delete: resourceDataplexAssetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexAssetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "dataplex_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The zone for the resource", + }, + + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the asset.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Immutable. Specification of the resource that is referenced by this asset.", + MaxItems: 1, + Elem: DataplexAssetResourceSpecSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the asset.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was created.", + }, + + "discovery_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the discovery feature applied to data referenced by this asset.", + Elem: DataplexAssetDiscoveryStatusSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the asset.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "resource_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the resource referenced by this asset.", + Elem: DataplexAssetResourceStatusSchema(), + }, + + "security_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the security policy applied to resource referenced by this asset.", + Elem: DataplexAssetSecurityStatusSchema(), + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was last updated.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexAssetDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexAssetResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}`", + }, + + "read_access_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "last_run_duration": { + Type: schema.TypeString, + Computed: true, + Description: "The duration of the last discovery run.", + }, + + "last_run_time": { + Type: schema.TypeString, + Computed: true, + Description: "The start time of the last discovery run.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current status of the discovery feature. Possible values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED", + }, + + "stats": { + Type: schema.TypeList, + Computed: true, + Description: "Data Stats of the asset reported by discovery.", + Elem: DataplexAssetDiscoveryStatusStatsSchema(), + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusStatsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_items": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of data items within the referenced resource.", + }, + + "data_size": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of stored data bytes within the referenced resource.", + }, + + "filesets": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of fileset entities within the referenced resource.", + }, + + "tables": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of table entities within the referenced resource.", + }, + }, + } +} + +func DataplexAssetResourceStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the managed resource. Possible values: STATE_UNSPECIFIED, READY, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetSecurityStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the security policy applied to the attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetAsset(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexAsset %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("dataplex_zone", res.DataplexZone); err != nil { + return fmt.Errorf("error setting dataplex_zone in state: %s", err) + } + if err = d.Set("discovery_spec", flattenDataplexAssetDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexAssetResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("discovery_status", flattenDataplexAssetDiscoveryStatus(res.DiscoveryStatus)); err != nil { + return fmt.Errorf("error setting discovery_status in state: %s", err) + } + if err = d.Set("labels", flattenDataplexAssetLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("resource_status", flattenDataplexAssetResourceStatus(res.ResourceStatus)); err != nil { + return fmt.Errorf("error setting resource_status in state: %s", err) + } + if err = d.Set("security_status", flattenDataplexAssetSecurityStatus(res.SecurityStatus)); err != nil { + return fmt.Errorf("error setting security_status in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataplexAssetTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Asset %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteAsset(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Asset: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Asset %q", d.Id()) + return nil +} + +func resourceDataplexAssetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexAssetDiscoverySpec(o interface{}) *AssetDiscoverySpec { + if o == nil { + return EmptyAssetDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyAssetDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &AssetDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexAssetDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: tpgdclresource.ExpandStringArray(obj["exclude_patterns"]), + IncludePatterns: tpgdclresource.ExpandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexAssetDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.String(obj["schedule"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpec(obj *AssetDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexAssetDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexAssetDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecCsvOptions(o interface{}) *AssetDiscoverySpecCsvOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &AssetDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexAssetDiscoverySpecCsvOptions(obj *AssetDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecJsonOptions(o interface{}) *AssetDiscoverySpecJsonOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &AssetDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpecJsonOptions(obj *AssetDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetResourceSpec(o interface{}) *AssetResourceSpec { + if o == nil { + return EmptyAssetResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyAssetResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &AssetResourceSpec{ + Type: AssetResourceSpecTypeEnumRef(obj["type"].(string)), + Name: dcl.String(obj["name"].(string)), + ReadAccessMode: AssetResourceSpecReadAccessModeEnumRef(obj["read_access_mode"].(string)), + } +} + +func flattenDataplexAssetResourceSpec(obj *AssetResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "type": obj.Type, + "name": obj.Name, + "read_access_mode": obj.ReadAccessMode, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatus(obj *AssetDiscoveryStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "last_run_duration": obj.LastRunDuration, + "last_run_time": obj.LastRunTime, + "message": obj.Message, + "state": obj.State, + "stats": flattenDataplexAssetDiscoveryStatusStats(obj.Stats), + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatusStats(obj *AssetDiscoveryStatusStats) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "data_items": obj.DataItems, + "data_size": obj.DataSize, + "filesets": obj.Filesets, + "tables": obj.Tables, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetResourceStatus(obj *AssetResourceStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetSecurityStatus(obj *AssetSecurityStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataplexAssetTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go new file mode 100644 index 000000000000..fd11ca55f0ee --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go @@ -0,0 +1,234 @@ +package dataplex_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccDataplexAsset_BasicAssetHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexAssetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexAsset_BasicAssetHandWritten(context), + }, + { + ResourceName: "google_dataplex_asset.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_spec.0.name", "labels", "terraform_labels"}, + }, + { + Config: testAccDataplexAsset_BasicAssetHandWrittenUpdate0(context), + }, + { + ResourceName: "google_dataplex_asset.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_spec.0.name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexAsset_BasicAssetHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "basic_bucket" { + name = "tf-test-bucket%{random_suffix}" + location = "%{region}" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "tf-test-lake%{random_suffix}" + location = "%{region}" + project = "%{project_name}" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "tf-test-zone%{random_suffix}" + location = "%{region}" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "%{project_name}" +} + + +resource "google_dataplex_asset" "primary" { + name = "tf-test-asset%{random_suffix}" + location = "%{region}" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/%{project_name}/buckets/tf-test-bucket%{random_suffix}" + type = "STORAGE_BUCKET" + } + + labels = { + env = "foo" + my-asset = "exists" + } + + + project = "%{project_name}" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +`, context) +} + +func testAccDataplexAsset_BasicAssetHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "basic_bucket" { + name = "tf-test-bucket%{random_suffix}" + location = "%{region}" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "tf-test-lake%{random_suffix}" + location = "%{region}" + project = "%{project_name}" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "tf-test-zone%{random_suffix}" + location = "%{region}" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "%{project_name}" +} + + +resource "google_dataplex_asset" "primary" { + name = "tf-test-asset%{random_suffix}" + location = "%{region}" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/%{project_name}/buckets/tf-test-bucket%{random_suffix}" + type = "STORAGE_BUCKET" + } + + labels = { + env = "foo" + my-asset = "exists" + } + + + project = "%{project_name}" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +`, context) +} + +func testAccCheckDataplexAssetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_asset" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(rs.Primary.Attributes["dataplex_zone"]), + Lake: dcl.String(rs.Primary.Attributes["lake"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: dataplex.AssetStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := dataplex.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetAsset(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_asset still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml index 10a0f3fbfb9c..dce96244aa71 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_dataplex_asset' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataplex.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Asset' diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go new file mode 100644 index 000000000000..104ede539247 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go @@ -0,0 +1,555 @@ +package dataplex + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataplexLake() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexLakeCreate, + Read: resourceDataplexLakeRead, + Update: resourceDataplexLakeUpdate, + Delete: resourceDataplexLakeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexLakeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the lake.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the lake.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "metastore": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Settings to manage lake and Dataproc Metastore service instance association.", + MaxItems: 1, + Elem: DataplexLakeMetastoreSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "asset_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Aggregated status of the underlying assets of the lake.", + Elem: DataplexLakeAssetStatusSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the lake was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User-defined labels for the lake.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "metastore_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Metastore status of the lake.", + Elem: DataplexLakeMetastoreStatusSchema(), + }, + + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Service account associated with this lake. This service account must be authorized to access or operate on resources managed by the lake.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the lake. This ID will be different if the lake is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the lake was last updated.", + }, + }, + } +} + +func DataplexLakeMetastoreSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}`", + }, + }, + } +} + +func DataplexLakeAssetStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of active assets.", + }, + + "security_policy_applying_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexLakeMetastoreStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "The URI of the endpoint used to access the Metastore service.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current status.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Current state of association. Possible values: STATE_UNSPECIFIED, NONE, READY, UPDATING, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the metastore status of the lake.", + }, + }, + } +} + +func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyLake(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Lake: %s", err) + } + + log.Printf("[DEBUG] Finished creating Lake %q: %#v", d.Id(), res) + + return resourceDataplexLakeRead(d, meta) +} + +func resourceDataplexLakeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetLake(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexLake %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("metastore", flattenDataplexLakeMetastore(res.Metastore)); err != nil { + return fmt.Errorf("error setting metastore in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("asset_status", flattenDataplexLakeAssetStatus(res.AssetStatus)); err != nil { + return fmt.Errorf("error setting asset_status in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenDataplexLakeLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("metastore_status", flattenDataplexLakeMetastoreStatus(res.MetastoreStatus)); err != nil { + return fmt.Errorf("error setting metastore_status in state: %s", err) + } + if err = d.Set("service_account", res.ServiceAccount); err != nil { + return fmt.Errorf("error setting service_account in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataplexLakeTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexLakeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyLake(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Lake: %s", err) + } + + log.Printf("[DEBUG] Finished creating Lake %q: %#v", d.Id(), res) + + return resourceDataplexLakeRead(d, meta) +} + +func resourceDataplexLakeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Lake %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteLake(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Lake: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Lake %q", d.Id()) + return nil +} + +func resourceDataplexLakeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexLakeMetastore(o interface{}) *LakeMetastore { + if o == nil { + return EmptyLakeMetastore + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyLakeMetastore + } + obj := objArr[0].(map[string]interface{}) + return &LakeMetastore{ + Service: dcl.String(obj["service"].(string)), + } +} + +func flattenDataplexLakeMetastore(obj *LakeMetastore) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + } + + return []interface{}{transformed} + +} + +func flattenDataplexLakeAssetStatus(obj *LakeAssetStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "active_assets": obj.ActiveAssets, + "security_policy_applying_assets": obj.SecurityPolicyApplyingAssets, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexLakeMetastoreStatus(obj *LakeMetastoreStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "endpoint": obj.Endpoint, + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexLakeLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataplexLakeTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go new file mode 100644 index 000000000000..7e4043359826 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go @@ -0,0 +1,127 @@ +package dataplex_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccDataplexLake_BasicLake(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexLakeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexLake_BasicLake(context), + }, + { + ResourceName: "google_dataplex_lake.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDataplexLake_BasicLakeUpdate0(context), + }, + { + ResourceName: "google_dataplex_lake.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexLake_BasicLake(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_lake" "primary" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccDataplexLake_BasicLakeUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_lake" "primary" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Updated description for lake" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccCheckDataplexLakeDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_lake" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Lake{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + ServiceAccount: dcl.StringOrNil(rs.Primary.Attributes["service_account"]), + State: dataplex.LakeStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := dataplex.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetLake(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_lake still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml index ab6d509ab284..977e7aaeb14c 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_dataplex_lake' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataplex.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Lake' diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go new file mode 100644 index 000000000000..9fd412f4aa82 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go @@ -0,0 +1,53 @@ +package dataplex + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("DataplexLake", testSweepDataplexLake) +} + +func testSweepDataplexLake(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataplexLake") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLDataplexClient(config, config.UserAgent, "", 0) + err = client.DeleteAllLake(context.Background(), d["project"], d["location"], isDeletableDataplexLake) + if err != nil { + return err + } + return nil +} + +func isDeletableDataplexLake(r *Lake) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go new file mode 100644 index 000000000000..4ae8af723d60 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go @@ -0,0 +1,731 @@ +package dataplex + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataplexZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexZoneCreate, + Read: resourceDataplexZoneRead, + Update: resourceDataplexZoneUpdate, + Delete: resourceDataplexZoneDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexZoneImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data in this zone.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the zone.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Specification of the resources that are referenced by the assets within this zone.", + MaxItems: 1, + Elem: DataplexZoneResourceSpecSchema(), + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the zone.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "asset_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Aggregated status of the underlying assets of the zone.", + Elem: DataplexZoneAssetStatusSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the zone.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was last updated.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexZoneDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexZoneResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION", + }, + }, + } +} + +func DataplexZoneAssetStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of active assets.", + }, + + "security_policy_applying_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetZone(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexZone %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("discovery_spec", flattenDataplexZoneDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexZoneResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("type", res.Type); err != nil { + return fmt.Errorf("error setting type in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("asset_status", flattenDataplexZoneAssetStatus(res.AssetStatus)); err != nil { + return fmt.Errorf("error setting asset_status in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenDataplexZoneLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataplexZoneTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Zone %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteZone(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Zone: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Zone %q", d.Id()) + return nil +} + +func resourceDataplexZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexZoneDiscoverySpec(o interface{}) *ZoneDiscoverySpec { + if o == nil { + return EmptyZoneDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyZoneDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &ZoneDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexZoneDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: tpgdclresource.ExpandStringArray(obj["exclude_patterns"]), + IncludePatterns: tpgdclresource.ExpandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexZoneDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.StringOrNil(obj["schedule"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpec(obj *ZoneDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexZoneDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexZoneDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecCsvOptions(o interface{}) *ZoneDiscoverySpecCsvOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ZoneDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexZoneDiscoverySpecCsvOptions(obj *ZoneDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecJsonOptions(o interface{}) *ZoneDiscoverySpecJsonOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ZoneDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpecJsonOptions(obj *ZoneDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneResourceSpec(o interface{}) *ZoneResourceSpec { + if o == nil { + return EmptyZoneResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyZoneResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &ZoneResourceSpec{ + LocationType: ZoneResourceSpecLocationTypeEnumRef(obj["location_type"].(string)), + } +} + +func flattenDataplexZoneResourceSpec(obj *ZoneResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location_type": obj.LocationType, + } + + return []interface{}{transformed} + +} + +func flattenDataplexZoneAssetStatus(obj *ZoneAssetStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "active_assets": obj.ActiveAssets, + "security_policy_applying_assets": obj.SecurityPolicyApplyingAssets, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexZoneLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataplexZoneTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go new file mode 100644 index 000000000000..caaf1f4a280f --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go @@ -0,0 +1,171 @@ +package dataplex_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccDataplexZone_BasicZone(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexZone_BasicZone(context), + }, + { + ResourceName: "google_dataplex_zone.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDataplexZone_BasicZoneUpdate0(context), + }, + { + ResourceName: "google_dataplex_zone.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexZone_BasicZone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "%{region}" + name = "tf-test-zone%{random_suffix}" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL" + display_name = "Zone for DCL" + project = "%{project_name}" + labels = {} +} + +resource "google_dataplex_lake" "basic" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccDataplexZone_BasicZoneUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "%{region}" + name = "tf-test-zone%{random_suffix}" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL Updated" + display_name = "Zone for DCL" + project = "%{project_name}" + + labels = { + updated_label = "exists" + } +} + +resource "google_dataplex_lake" "basic" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccCheckDataplexZoneDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_zone" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Zone{ + Lake: dcl.String(rs.Primary.Attributes["lake"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Type: dataplex.ZoneTypeEnumRef(rs.Primary.Attributes["type"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: dataplex.ZoneStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := dataplex.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetZone(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_zone still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml index 08a8b327ecd7..45cc3eda9aa0 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_dataplex_zone' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataplex.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Zone' diff --git a/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl new file mode 100644 index 000000000000..1b0e81b596af --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl @@ -0,0 +1,739 @@ +package dataplex + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Zone struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *ZoneStateEnum `json:"state"` + Type *ZoneTypeEnum `json:"type"` + DiscoverySpec *ZoneDiscoverySpec `json:"discoverySpec"` + ResourceSpec *ZoneResourceSpec `json:"resourceSpec"` + AssetStatus *ZoneAssetStatus `json:"assetStatus"` + Project *string `json:"project"` + Location *string `json:"location"` + Lake *string `json:"lake"` +} + +func (r *Zone) String() string { + return dcl.SprintResource(r) +} + +// The enum ZoneStateEnum. +type ZoneStateEnum string + +// ZoneStateEnumRef returns a *ZoneStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ZoneStateEnumRef(s string) *ZoneStateEnum { + v := ZoneStateEnum(s) + return &v +} + +func (v ZoneStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ACTIVE", "CREATING", "DELETING", "ACTION_REQUIRED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ZoneStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ZoneTypeEnum. +type ZoneTypeEnum string + +// ZoneTypeEnumRef returns a *ZoneTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ZoneTypeEnumRef(s string) *ZoneTypeEnum { + v := ZoneTypeEnum(s) + return &v +} + +func (v ZoneTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TYPE_UNSPECIFIED", "RAW", "CURATED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ZoneTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ZoneResourceSpecLocationTypeEnum. +type ZoneResourceSpecLocationTypeEnum string + +// ZoneResourceSpecLocationTypeEnumRef returns a *ZoneResourceSpecLocationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ZoneResourceSpecLocationTypeEnumRef(s string) *ZoneResourceSpecLocationTypeEnum { + v := ZoneResourceSpecLocationTypeEnum(s) + return &v +} + +func (v ZoneResourceSpecLocationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"LOCATION_TYPE_UNSPECIFIED", "SINGLE_REGION", "MULTI_REGION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ZoneResourceSpecLocationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +type ZoneDiscoverySpec struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + IncludePatterns []string `json:"includePatterns"` + ExcludePatterns []string `json:"excludePatterns"` + CsvOptions *ZoneDiscoverySpecCsvOptions `json:"csvOptions"` + JsonOptions *ZoneDiscoverySpecJsonOptions `json:"jsonOptions"` + Schedule *string `json:"schedule"` +} + +type jsonZoneDiscoverySpec ZoneDiscoverySpec + +func (r *ZoneDiscoverySpec) UnmarshalJSON(data []byte) error { + var res jsonZoneDiscoverySpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneDiscoverySpec + } else { + + r.Enabled = res.Enabled + + r.IncludePatterns = res.IncludePatterns + + r.ExcludePatterns = res.ExcludePatterns + + r.CsvOptions = res.CsvOptions + + r.JsonOptions = res.JsonOptions + + r.Schedule = res.Schedule + + } + return nil +} + +// This object is used to assert a desired state where this ZoneDiscoverySpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneDiscoverySpec *ZoneDiscoverySpec = &ZoneDiscoverySpec{empty: true} + +func (r *ZoneDiscoverySpec) Empty() bool { + return r.empty +} + +func (r *ZoneDiscoverySpec) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneDiscoverySpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneDiscoverySpecCsvOptions struct { + empty bool `json:"-"` + HeaderRows *int64 `json:"headerRows"` + Delimiter *string `json:"delimiter"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonZoneDiscoverySpecCsvOptions ZoneDiscoverySpecCsvOptions + +func (r *ZoneDiscoverySpecCsvOptions) UnmarshalJSON(data []byte) error { + var res jsonZoneDiscoverySpecCsvOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneDiscoverySpecCsvOptions + } else { + + r.HeaderRows = res.HeaderRows + + r.Delimiter = res.Delimiter + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this ZoneDiscoverySpecCsvOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneDiscoverySpecCsvOptions *ZoneDiscoverySpecCsvOptions = &ZoneDiscoverySpecCsvOptions{empty: true} + +func (r *ZoneDiscoverySpecCsvOptions) Empty() bool { + return r.empty +} + +func (r *ZoneDiscoverySpecCsvOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneDiscoverySpecCsvOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneDiscoverySpecJsonOptions struct { + empty bool `json:"-"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonZoneDiscoverySpecJsonOptions ZoneDiscoverySpecJsonOptions + +func (r *ZoneDiscoverySpecJsonOptions) UnmarshalJSON(data []byte) error { + var res jsonZoneDiscoverySpecJsonOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneDiscoverySpecJsonOptions + } else { + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this ZoneDiscoverySpecJsonOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneDiscoverySpecJsonOptions *ZoneDiscoverySpecJsonOptions = &ZoneDiscoverySpecJsonOptions{empty: true} + +func (r *ZoneDiscoverySpecJsonOptions) Empty() bool { + return r.empty +} + +func (r *ZoneDiscoverySpecJsonOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneDiscoverySpecJsonOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneResourceSpec struct { + empty bool `json:"-"` + LocationType *ZoneResourceSpecLocationTypeEnum `json:"locationType"` +} + +type jsonZoneResourceSpec ZoneResourceSpec + +func (r *ZoneResourceSpec) UnmarshalJSON(data []byte) error { + var res jsonZoneResourceSpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneResourceSpec + } else { + + r.LocationType = res.LocationType + + } + return nil +} + +// This object is used to assert a desired state where this ZoneResourceSpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneResourceSpec *ZoneResourceSpec = &ZoneResourceSpec{empty: true} + +func (r *ZoneResourceSpec) Empty() bool { + return r.empty +} + +func (r *ZoneResourceSpec) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneResourceSpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneAssetStatus struct { + empty bool `json:"-"` + UpdateTime *string `json:"updateTime"` + ActiveAssets *int64 `json:"activeAssets"` + SecurityPolicyApplyingAssets *int64 `json:"securityPolicyApplyingAssets"` +} + +type jsonZoneAssetStatus ZoneAssetStatus + +func (r *ZoneAssetStatus) UnmarshalJSON(data []byte) error { + var res jsonZoneAssetStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneAssetStatus + } else { + + r.UpdateTime = res.UpdateTime + + r.ActiveAssets = res.ActiveAssets + + r.SecurityPolicyApplyingAssets = res.SecurityPolicyApplyingAssets + + } + return nil +} + +// This object is used to assert a desired state where this ZoneAssetStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneAssetStatus *ZoneAssetStatus = &ZoneAssetStatus{empty: true} + +func (r *ZoneAssetStatus) Empty() bool { + return r.empty +} + +func (r *ZoneAssetStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneAssetStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Zone) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataplex", + Type: "Zone", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataplex", +{{- end }} + } +} + +func (r *Zone) ID() (string, error) { + if err := extractZoneFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "type": dcl.ValueOrEmptyString(nr.Type), + "discovery_spec": dcl.ValueOrEmptyString(nr.DiscoverySpec), + "resource_spec": dcl.ValueOrEmptyString(nr.ResourceSpec), + "asset_status": dcl.ValueOrEmptyString(nr.AssetStatus), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ZoneMaxPage = -1 + +type ZoneList struct { + Items []*Zone + + nextToken string + + pageSize int32 + + resource *Zone +} + +func (l *ZoneList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ZoneList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listZone(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListZone(ctx context.Context, project, location, lake string) (*ZoneList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListZoneWithMaxResults(ctx, project, location, lake, ZoneMaxPage) + +} + +func (c *Client) ListZoneWithMaxResults(ctx context.Context, project, location, lake string, pageSize int32) (*ZoneList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Zone{ + Project: &project, + Location: &location, + Lake: &lake, + } + items, token, err := c.listZone(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ZoneList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetZone(ctx context.Context, r *Zone) (*Zone, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractZoneFields(r) + + b, err := c.getZoneRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalZone(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Lake = r.Lake + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeZoneNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractZoneFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteZone(ctx context.Context, r *Zone) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Zone resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Zone...") + deleteOp := deleteZoneOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllZone deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllZone(ctx context.Context, project, location, lake string, filter func(*Zone) bool) error { + listObj, err := c.ListZone(ctx, project, location, lake) + if err != nil { + return err + } + + err = c.deleteAllZone(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllZone(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyZone(ctx context.Context, rawDesired *Zone, opts ...dcl.ApplyOption) (*Zone, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Zone + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyZoneHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyZoneHelper(c *Client, ctx context.Context, rawDesired *Zone, opts ...dcl.ApplyOption) (*Zone, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyZone...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractZoneFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.zoneDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToZoneDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []zoneApiOperation + if create { + ops = append(ops, &createZoneOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyZoneDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyZoneDiff(c *Client, ctx context.Context, desired *Zone, rawDesired *Zone, ops []zoneApiOperation, opts ...dcl.ApplyOption) (*Zone, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetZone(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createZoneOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapZone(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeZoneNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeZoneNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeZoneDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractZoneFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractZoneFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffZone(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Zone) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl new file mode 100644 index 000000000000..97c9d297a357 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl @@ -0,0 +1,2830 @@ +package dataplex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Zone) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "type"); err != nil { + return err + } + if err := dcl.Required(r, "discoverySpec"); err != nil { + return err + } + if err := dcl.Required(r, "resourceSpec"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Lake, "Lake"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.DiscoverySpec) { + if err := r.DiscoverySpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ResourceSpec) { + if err := r.ResourceSpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AssetStatus) { + if err := r.AssetStatus.validate(); err != nil { + return err + } + } + return nil +} +func (r *ZoneDiscoverySpec) validate() error { + if err := dcl.Required(r, "enabled"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.CsvOptions) { + if err := r.CsvOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.JsonOptions) { + if err := r.JsonOptions.validate(); err != nil { + return err + } + } + return nil +} +func (r *ZoneDiscoverySpecCsvOptions) validate() error { + return nil +} +func (r *ZoneDiscoverySpecJsonOptions) validate() error { + return nil +} +func (r *ZoneResourceSpec) validate() error { + if err := dcl.Required(r, "locationType"); err != nil { + return err + } + return nil +} +func (r *ZoneAssetStatus) validate() error { + return nil +} +func (r *Zone) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataplex.googleapis.com/v1/", params) +} + +func (r *Zone) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Zone) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones", nr.basePath(), userBasePath, params), nil + +} + +func (r *Zone) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones?zoneId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Zone) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Zone) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Zone) SetPolicyVerb() string { + return "" +} + +func (r *Zone) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Zone) IAMPolicyVersion() int { + return 3 +} + +// zoneApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type zoneApiOperation interface { + do(context.Context, *Zone, *Client) error +} + +// newUpdateZoneUpdateZoneRequest creates a request for an +// Zone resource's UpdateZone update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateZoneUpdateZoneRequest(ctx context.Context, f *Zone, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["name"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandZoneDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["discoverySpec"] = v + } + if v, err := expandZoneAssetStatus(c, f.AssetStatus, res); err != nil { + return nil, fmt.Errorf("error expanding AssetStatus into assetStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["assetStatus"] = v + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s", *f.Project, *f.Location, *f.Lake, *f.Name) + + return req, nil +} + +// marshalUpdateZoneUpdateZoneRequest converts the update into +// the final JSON request body. +func marshalUpdateZoneUpdateZoneRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateZoneUpdateZoneOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateZoneUpdateZoneOperation) do(ctx context.Context, r *Zone, c *Client) error { + _, err := c.GetZone(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateZone") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateZoneUpdateZoneRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateZoneUpdateZoneRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listZoneRaw(ctx context.Context, r *Zone, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ZoneMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listZoneOperation struct { + Zones []map[string]interface{} `json:"zones"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listZone(ctx context.Context, r *Zone, pageToken string, pageSize int32) ([]*Zone, string, error) { + b, err := c.listZoneRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listZoneOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Zone + for _, v := range m.Zones { + res, err := unmarshalMapZone(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Lake = r.Lake + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllZone(ctx context.Context, f func(*Zone) bool, resources []*Zone) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteZone(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteZoneOperation struct{} + +func (op *deleteZoneOperation) do(ctx context.Context, r *Zone, c *Client) error { + r, err := c.GetZone(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Zone not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetZone checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetZone(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createZoneOperation struct { + response map[string]interface{} +} + +func (op *createZoneOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createZoneOperation) do(ctx context.Context, r *Zone, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetZone(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getZoneRaw(ctx context.Context, r *Zone) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) zoneDiffsForRawDesired(ctx context.Context, rawDesired *Zone, opts ...dcl.ApplyOption) (initial, desired *Zone, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Zone + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Zone); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Zone, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetZone(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Zone resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Zone resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Zone resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeZoneDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Zone: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Zone: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractZoneFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeZoneInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Zone: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeZoneDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Zone: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffZone(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeZoneInitialState(rawInitial, rawDesired *Zone) (*Zone, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeZoneDesiredState(rawDesired, rawInitial *Zone, opts ...dcl.ApplyOption) (*Zone, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.DiscoverySpec = canonicalizeZoneDiscoverySpec(rawDesired.DiscoverySpec, nil, opts...) + rawDesired.ResourceSpec = canonicalizeZoneResourceSpec(rawDesired.ResourceSpec, nil, opts...) + rawDesired.AssetStatus = canonicalizeZoneAssetStatus(rawDesired.AssetStatus, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Zone{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.IsZeroValue(rawDesired.Type) || (dcl.IsEmptyValueIndirect(rawDesired.Type) && dcl.IsEmptyValueIndirect(rawInitial.Type)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Type = rawInitial.Type + } else { + canonicalDesired.Type = rawDesired.Type + } + canonicalDesired.DiscoverySpec = canonicalizeZoneDiscoverySpec(rawDesired.DiscoverySpec, rawInitial.DiscoverySpec, opts...) + canonicalDesired.ResourceSpec = canonicalizeZoneResourceSpec(rawDesired.ResourceSpec, rawInitial.ResourceSpec, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Lake, rawInitial.Lake) { + canonicalDesired.Lake = rawInitial.Lake + } else { + canonicalDesired.Lake = rawDesired.Lake + } + return canonicalDesired, nil +} + +func canonicalizeZoneNewState(c *Client, rawNew, rawDesired *Zone) (*Zone, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Type) && dcl.IsEmptyValueIndirect(rawDesired.Type) { + rawNew.Type = rawDesired.Type + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DiscoverySpec) && dcl.IsEmptyValueIndirect(rawDesired.DiscoverySpec) { + rawNew.DiscoverySpec = rawDesired.DiscoverySpec + } else { + rawNew.DiscoverySpec = canonicalizeNewZoneDiscoverySpec(c, rawDesired.DiscoverySpec, rawNew.DiscoverySpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceSpec) && dcl.IsEmptyValueIndirect(rawDesired.ResourceSpec) { + rawNew.ResourceSpec = rawDesired.ResourceSpec + } else { + rawNew.ResourceSpec = canonicalizeNewZoneResourceSpec(c, rawDesired.ResourceSpec, rawNew.ResourceSpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.AssetStatus) && dcl.IsEmptyValueIndirect(rawDesired.AssetStatus) { + rawNew.AssetStatus = rawDesired.AssetStatus + } else { + rawNew.AssetStatus = canonicalizeNewZoneAssetStatus(c, rawDesired.AssetStatus, rawNew.AssetStatus) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Lake = rawDesired.Lake + + return rawNew, nil +} + +func canonicalizeZoneDiscoverySpec(des, initial *ZoneDiscoverySpec, opts ...dcl.ApplyOption) *ZoneDiscoverySpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneDiscoverySpec{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, initial.IncludePatterns) { + cDes.IncludePatterns = initial.IncludePatterns + } else { + cDes.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, initial.ExcludePatterns) { + cDes.ExcludePatterns = initial.ExcludePatterns + } else { + cDes.ExcludePatterns = des.ExcludePatterns + } + cDes.CsvOptions = canonicalizeZoneDiscoverySpecCsvOptions(des.CsvOptions, initial.CsvOptions, opts...) + cDes.JsonOptions = canonicalizeZoneDiscoverySpecJsonOptions(des.JsonOptions, initial.JsonOptions, opts...) + if dcl.StringCanonicalize(des.Schedule, initial.Schedule) || dcl.IsZeroValue(des.Schedule) { + cDes.Schedule = initial.Schedule + } else { + cDes.Schedule = des.Schedule + } + + return cDes +} + +func canonicalizeZoneDiscoverySpecSlice(des, initial []ZoneDiscoverySpec, opts ...dcl.ApplyOption) []ZoneDiscoverySpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneDiscoverySpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneDiscoverySpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneDiscoverySpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneDiscoverySpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneDiscoverySpec(c *Client, des, nw *ZoneDiscoverySpec) *ZoneDiscoverySpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneDiscoverySpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, nw.IncludePatterns) { + nw.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, nw.ExcludePatterns) { + nw.ExcludePatterns = des.ExcludePatterns + } + nw.CsvOptions = canonicalizeNewZoneDiscoverySpecCsvOptions(c, des.CsvOptions, nw.CsvOptions) + nw.JsonOptions = canonicalizeNewZoneDiscoverySpecJsonOptions(c, des.JsonOptions, nw.JsonOptions) + if dcl.StringCanonicalize(des.Schedule, nw.Schedule) { + nw.Schedule = des.Schedule + } + + return nw +} + +func canonicalizeNewZoneDiscoverySpecSet(c *Client, des, nw []ZoneDiscoverySpec) []ZoneDiscoverySpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneDiscoverySpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneDiscoverySpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneDiscoverySpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneDiscoverySpecSlice(c *Client, des, nw []ZoneDiscoverySpec) []ZoneDiscoverySpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneDiscoverySpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneDiscoverySpec(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneDiscoverySpecCsvOptions(des, initial *ZoneDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) *ZoneDiscoverySpecCsvOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneDiscoverySpecCsvOptions{} + + if dcl.IsZeroValue(des.HeaderRows) || (dcl.IsEmptyValueIndirect(des.HeaderRows) && dcl.IsEmptyValueIndirect(initial.HeaderRows)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.HeaderRows = initial.HeaderRows + } else { + cDes.HeaderRows = des.HeaderRows + } + if dcl.StringCanonicalize(des.Delimiter, initial.Delimiter) || dcl.IsZeroValue(des.Delimiter) { + cDes.Delimiter = initial.Delimiter + } else { + cDes.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeZoneDiscoverySpecCsvOptionsSlice(des, initial []ZoneDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) []ZoneDiscoverySpecCsvOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneDiscoverySpecCsvOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneDiscoverySpecCsvOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneDiscoverySpecCsvOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneDiscoverySpecCsvOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneDiscoverySpecCsvOptions(c *Client, des, nw *ZoneDiscoverySpecCsvOptions) *ZoneDiscoverySpecCsvOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneDiscoverySpecCsvOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Delimiter, nw.Delimiter) { + nw.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewZoneDiscoverySpecCsvOptionsSet(c *Client, des, nw []ZoneDiscoverySpecCsvOptions) []ZoneDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneDiscoverySpecCsvOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneDiscoverySpecCsvOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneDiscoverySpecCsvOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneDiscoverySpecCsvOptionsSlice(c *Client, des, nw []ZoneDiscoverySpecCsvOptions) []ZoneDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneDiscoverySpecCsvOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneDiscoverySpecCsvOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneDiscoverySpecJsonOptions(des, initial *ZoneDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) *ZoneDiscoverySpecJsonOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneDiscoverySpecJsonOptions{} + + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeZoneDiscoverySpecJsonOptionsSlice(des, initial []ZoneDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) []ZoneDiscoverySpecJsonOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneDiscoverySpecJsonOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneDiscoverySpecJsonOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneDiscoverySpecJsonOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneDiscoverySpecJsonOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneDiscoverySpecJsonOptions(c *Client, des, nw *ZoneDiscoverySpecJsonOptions) *ZoneDiscoverySpecJsonOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneDiscoverySpecJsonOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewZoneDiscoverySpecJsonOptionsSet(c *Client, des, nw []ZoneDiscoverySpecJsonOptions) []ZoneDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneDiscoverySpecJsonOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneDiscoverySpecJsonOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneDiscoverySpecJsonOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneDiscoverySpecJsonOptionsSlice(c *Client, des, nw []ZoneDiscoverySpecJsonOptions) []ZoneDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneDiscoverySpecJsonOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneDiscoverySpecJsonOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneResourceSpec(des, initial *ZoneResourceSpec, opts ...dcl.ApplyOption) *ZoneResourceSpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneResourceSpec{} + + if dcl.IsZeroValue(des.LocationType) || (dcl.IsEmptyValueIndirect(des.LocationType) && dcl.IsEmptyValueIndirect(initial.LocationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.LocationType = initial.LocationType + } else { + cDes.LocationType = des.LocationType + } + + return cDes +} + +func canonicalizeZoneResourceSpecSlice(des, initial []ZoneResourceSpec, opts ...dcl.ApplyOption) []ZoneResourceSpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneResourceSpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneResourceSpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneResourceSpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneResourceSpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneResourceSpec(c *Client, des, nw *ZoneResourceSpec) *ZoneResourceSpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneResourceSpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewZoneResourceSpecSet(c *Client, des, nw []ZoneResourceSpec) []ZoneResourceSpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneResourceSpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneResourceSpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneResourceSpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneResourceSpecSlice(c *Client, des, nw []ZoneResourceSpec) []ZoneResourceSpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneResourceSpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneResourceSpec(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneAssetStatus(des, initial *ZoneAssetStatus, opts ...dcl.ApplyOption) *ZoneAssetStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneAssetStatus{} + + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.IsZeroValue(des.ActiveAssets) || (dcl.IsEmptyValueIndirect(des.ActiveAssets) && dcl.IsEmptyValueIndirect(initial.ActiveAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ActiveAssets = initial.ActiveAssets + } else { + cDes.ActiveAssets = des.ActiveAssets + } + if dcl.IsZeroValue(des.SecurityPolicyApplyingAssets) || (dcl.IsEmptyValueIndirect(des.SecurityPolicyApplyingAssets) && dcl.IsEmptyValueIndirect(initial.SecurityPolicyApplyingAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SecurityPolicyApplyingAssets = initial.SecurityPolicyApplyingAssets + } else { + cDes.SecurityPolicyApplyingAssets = des.SecurityPolicyApplyingAssets + } + + return cDes +} + +func canonicalizeZoneAssetStatusSlice(des, initial []ZoneAssetStatus, opts ...dcl.ApplyOption) []ZoneAssetStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneAssetStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneAssetStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneAssetStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneAssetStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneAssetStatus(c *Client, des, nw *ZoneAssetStatus) *ZoneAssetStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneAssetStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewZoneAssetStatusSet(c *Client, des, nw []ZoneAssetStatus) []ZoneAssetStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneAssetStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneAssetStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneAssetStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneAssetStatusSlice(c *Client, des, nw []ZoneAssetStatus) []ZoneAssetStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneAssetStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneAssetStatus(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffZone(c *Client, desired, actual *Zone, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiscoverySpec, actual.DiscoverySpec, dcl.DiffInfo{ObjectFunction: compareZoneDiscoverySpecNewStyle, EmptyObject: EmptyZoneDiscoverySpec, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DiscoverySpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceSpec, actual.ResourceSpec, dcl.DiffInfo{ObjectFunction: compareZoneResourceSpecNewStyle, EmptyObject: EmptyZoneResourceSpec, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceSpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssetStatus, actual.AssetStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareZoneAssetStatusNewStyle, EmptyObject: EmptyZoneAssetStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AssetStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Lake, actual.Lake, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Lake")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareZoneDiscoverySpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneDiscoverySpec) + if !ok { + desiredNotPointer, ok := d.(ZoneDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpec or *ZoneDiscoverySpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneDiscoverySpec) + if !ok { + actualNotPointer, ok := a.(ZoneDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IncludePatterns, actual.IncludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("IncludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExcludePatterns, actual.ExcludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("ExcludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CsvOptions, actual.CsvOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareZoneDiscoverySpecCsvOptionsNewStyle, EmptyObject: EmptyZoneDiscoverySpecCsvOptions, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("CsvOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JsonOptions, actual.JsonOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareZoneDiscoverySpecJsonOptionsNewStyle, EmptyObject: EmptyZoneDiscoverySpecJsonOptions, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("JsonOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Schedule, actual.Schedule, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Schedule")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneDiscoverySpecCsvOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneDiscoverySpecCsvOptions) + if !ok { + desiredNotPointer, ok := d.(ZoneDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecCsvOptions or *ZoneDiscoverySpecCsvOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneDiscoverySpecCsvOptions) + if !ok { + actualNotPointer, ok := a.(ZoneDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecCsvOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HeaderRows, actual.HeaderRows, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("HeaderRows")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Delimiter, actual.Delimiter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Delimiter")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneDiscoverySpecJsonOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneDiscoverySpecJsonOptions) + if !ok { + desiredNotPointer, ok := d.(ZoneDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecJsonOptions or *ZoneDiscoverySpecJsonOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneDiscoverySpecJsonOptions) + if !ok { + actualNotPointer, ok := a.(ZoneDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecJsonOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneResourceSpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneResourceSpec) + if !ok { + desiredNotPointer, ok := d.(ZoneResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneResourceSpec or *ZoneResourceSpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneResourceSpec) + if !ok { + actualNotPointer, ok := a.(ZoneResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneResourceSpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.LocationType, actual.LocationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneAssetStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneAssetStatus) + if !ok { + desiredNotPointer, ok := d.(ZoneAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneAssetStatus or *ZoneAssetStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneAssetStatus) + if !ok { + actualNotPointer, ok := a.(ZoneAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneAssetStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ActiveAssets, actual.ActiveAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("ActiveAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityPolicyApplyingAssets, actual.SecurityPolicyApplyingAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("SecurityPolicyApplyingAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Zone) urlNormalized() *Zone { + normalized := dcl.Copy(*r).(Zone) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Lake = dcl.SelfLinkToName(r.Lake) + return &normalized +} + +func (r *Zone) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateZone" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Zone resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Zone) marshal(c *Client) ([]byte, error) { + m, err := expandZone(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Zone: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalZone decodes JSON responses into the Zone resource schema. +func unmarshalZone(b []byte, c *Client, res *Zone) (*Zone, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapZone(m, c, res) +} + +func unmarshalMapZone(m map[string]interface{}, c *Client, res *Zone) (*Zone, error) { + + flattened := flattenZone(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandZone expands Zone into a JSON request object. +func expandZone(c *Client, f *Zone) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Type; dcl.ValueShouldBeSent(v) { + m["type"] = v + } + if v, err := expandZoneDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["discoverySpec"] = v + } + if v, err := expandZoneResourceSpec(c, f.ResourceSpec, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSpec into resourceSpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["resourceSpec"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Lake into lake: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lake"] = v + } + + return m, nil +} + +// flattenZone flattens Zone from a JSON request object into the +// Zone type. +func flattenZone(c *Client, i interface{}, res *Zone) *Zone { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Zone{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenZoneStateEnum(m["state"]) + resultRes.Type = flattenZoneTypeEnum(m["type"]) + resultRes.DiscoverySpec = flattenZoneDiscoverySpec(c, m["discoverySpec"], res) + resultRes.ResourceSpec = flattenZoneResourceSpec(c, m["resourceSpec"], res) + resultRes.AssetStatus = flattenZoneAssetStatus(c, m["assetStatus"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Lake = dcl.FlattenString(m["lake"]) + + return resultRes +} + +// expandZoneDiscoverySpecMap expands the contents of ZoneDiscoverySpec into a JSON +// request object. +func expandZoneDiscoverySpecMap(c *Client, f map[string]ZoneDiscoverySpec, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneDiscoverySpecSlice expands the contents of ZoneDiscoverySpec into a JSON +// request object. +func expandZoneDiscoverySpecSlice(c *Client, f []ZoneDiscoverySpec, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneDiscoverySpecMap flattens the contents of ZoneDiscoverySpec from a JSON +// response object. +func flattenZoneDiscoverySpecMap(c *Client, i interface{}, res *Zone) map[string]ZoneDiscoverySpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneDiscoverySpec{} + } + + if len(a) == 0 { + return map[string]ZoneDiscoverySpec{} + } + + items := make(map[string]ZoneDiscoverySpec) + for k, item := range a { + items[k] = *flattenZoneDiscoverySpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneDiscoverySpecSlice flattens the contents of ZoneDiscoverySpec from a JSON +// response object. +func flattenZoneDiscoverySpecSlice(c *Client, i interface{}, res *Zone) []ZoneDiscoverySpec { + a, ok := i.([]interface{}) + if !ok { + return []ZoneDiscoverySpec{} + } + + if len(a) == 0 { + return []ZoneDiscoverySpec{} + } + + items := make([]ZoneDiscoverySpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneDiscoverySpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneDiscoverySpec expands an instance of ZoneDiscoverySpec into a JSON +// request object. +func expandZoneDiscoverySpec(c *Client, f *ZoneDiscoverySpec, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.IncludePatterns; v != nil { + m["includePatterns"] = v + } + if v := f.ExcludePatterns; v != nil { + m["excludePatterns"] = v + } + if v, err := expandZoneDiscoverySpecCsvOptions(c, f.CsvOptions, res); err != nil { + return nil, fmt.Errorf("error expanding CsvOptions into csvOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["csvOptions"] = v + } + if v, err := expandZoneDiscoverySpecJsonOptions(c, f.JsonOptions, res); err != nil { + return nil, fmt.Errorf("error expanding JsonOptions into jsonOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["jsonOptions"] = v + } + if v := f.Schedule; !dcl.IsEmptyValueIndirect(v) { + m["schedule"] = v + } + + return m, nil +} + +// flattenZoneDiscoverySpec flattens an instance of ZoneDiscoverySpec from a JSON +// response object. +func flattenZoneDiscoverySpec(c *Client, i interface{}, res *Zone) *ZoneDiscoverySpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneDiscoverySpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneDiscoverySpec + } + r.Enabled = flattenZoneDiscoverySpecEnable(c, m["enabled"], res) + r.IncludePatterns = dcl.FlattenStringSlice(m["includePatterns"]) + r.ExcludePatterns = dcl.FlattenStringSlice(m["excludePatterns"]) + r.CsvOptions = flattenZoneDiscoverySpecCsvOptions(c, m["csvOptions"], res) + r.JsonOptions = flattenZoneDiscoverySpecJsonOptions(c, m["jsonOptions"], res) + r.Schedule = dcl.FlattenString(m["schedule"]) + + return r +} + +// expandZoneDiscoverySpecCsvOptionsMap expands the contents of ZoneDiscoverySpecCsvOptions into a JSON +// request object. +func expandZoneDiscoverySpecCsvOptionsMap(c *Client, f map[string]ZoneDiscoverySpecCsvOptions, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneDiscoverySpecCsvOptionsSlice expands the contents of ZoneDiscoverySpecCsvOptions into a JSON +// request object. +func expandZoneDiscoverySpecCsvOptionsSlice(c *Client, f []ZoneDiscoverySpecCsvOptions, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneDiscoverySpecCsvOptionsMap flattens the contents of ZoneDiscoverySpecCsvOptions from a JSON +// response object. +func flattenZoneDiscoverySpecCsvOptionsMap(c *Client, i interface{}, res *Zone) map[string]ZoneDiscoverySpecCsvOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return map[string]ZoneDiscoverySpecCsvOptions{} + } + + items := make(map[string]ZoneDiscoverySpecCsvOptions) + for k, item := range a { + items[k] = *flattenZoneDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneDiscoverySpecCsvOptionsSlice flattens the contents of ZoneDiscoverySpecCsvOptions from a JSON +// response object. +func flattenZoneDiscoverySpecCsvOptionsSlice(c *Client, i interface{}, res *Zone) []ZoneDiscoverySpecCsvOptions { + a, ok := i.([]interface{}) + if !ok { + return []ZoneDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return []ZoneDiscoverySpecCsvOptions{} + } + + items := make([]ZoneDiscoverySpecCsvOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneDiscoverySpecCsvOptions expands an instance of ZoneDiscoverySpecCsvOptions into a JSON +// request object. +func expandZoneDiscoverySpecCsvOptions(c *Client, f *ZoneDiscoverySpecCsvOptions, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HeaderRows; !dcl.IsEmptyValueIndirect(v) { + m["headerRows"] = v + } + if v := f.Delimiter; !dcl.IsEmptyValueIndirect(v) { + m["delimiter"] = v + } + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenZoneDiscoverySpecCsvOptions flattens an instance of ZoneDiscoverySpecCsvOptions from a JSON +// response object. +func flattenZoneDiscoverySpecCsvOptions(c *Client, i interface{}, res *Zone) *ZoneDiscoverySpecCsvOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneDiscoverySpecCsvOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneDiscoverySpecCsvOptions + } + r.HeaderRows = dcl.FlattenInteger(m["headerRows"]) + r.Delimiter = dcl.FlattenString(m["delimiter"]) + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandZoneDiscoverySpecJsonOptionsMap expands the contents of ZoneDiscoverySpecJsonOptions into a JSON +// request object. +func expandZoneDiscoverySpecJsonOptionsMap(c *Client, f map[string]ZoneDiscoverySpecJsonOptions, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneDiscoverySpecJsonOptionsSlice expands the contents of ZoneDiscoverySpecJsonOptions into a JSON +// request object. +func expandZoneDiscoverySpecJsonOptionsSlice(c *Client, f []ZoneDiscoverySpecJsonOptions, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneDiscoverySpecJsonOptionsMap flattens the contents of ZoneDiscoverySpecJsonOptions from a JSON +// response object. +func flattenZoneDiscoverySpecJsonOptionsMap(c *Client, i interface{}, res *Zone) map[string]ZoneDiscoverySpecJsonOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return map[string]ZoneDiscoverySpecJsonOptions{} + } + + items := make(map[string]ZoneDiscoverySpecJsonOptions) + for k, item := range a { + items[k] = *flattenZoneDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneDiscoverySpecJsonOptionsSlice flattens the contents of ZoneDiscoverySpecJsonOptions from a JSON +// response object. +func flattenZoneDiscoverySpecJsonOptionsSlice(c *Client, i interface{}, res *Zone) []ZoneDiscoverySpecJsonOptions { + a, ok := i.([]interface{}) + if !ok { + return []ZoneDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return []ZoneDiscoverySpecJsonOptions{} + } + + items := make([]ZoneDiscoverySpecJsonOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneDiscoverySpecJsonOptions expands an instance of ZoneDiscoverySpecJsonOptions into a JSON +// request object. +func expandZoneDiscoverySpecJsonOptions(c *Client, f *ZoneDiscoverySpecJsonOptions, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenZoneDiscoverySpecJsonOptions flattens an instance of ZoneDiscoverySpecJsonOptions from a JSON +// response object. +func flattenZoneDiscoverySpecJsonOptions(c *Client, i interface{}, res *Zone) *ZoneDiscoverySpecJsonOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneDiscoverySpecJsonOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneDiscoverySpecJsonOptions + } + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandZoneResourceSpecMap expands the contents of ZoneResourceSpec into a JSON +// request object. +func expandZoneResourceSpecMap(c *Client, f map[string]ZoneResourceSpec, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneResourceSpecSlice expands the contents of ZoneResourceSpec into a JSON +// request object. +func expandZoneResourceSpecSlice(c *Client, f []ZoneResourceSpec, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneResourceSpecMap flattens the contents of ZoneResourceSpec from a JSON +// response object. +func flattenZoneResourceSpecMap(c *Client, i interface{}, res *Zone) map[string]ZoneResourceSpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneResourceSpec{} + } + + if len(a) == 0 { + return map[string]ZoneResourceSpec{} + } + + items := make(map[string]ZoneResourceSpec) + for k, item := range a { + items[k] = *flattenZoneResourceSpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneResourceSpecSlice flattens the contents of ZoneResourceSpec from a JSON +// response object. +func flattenZoneResourceSpecSlice(c *Client, i interface{}, res *Zone) []ZoneResourceSpec { + a, ok := i.([]interface{}) + if !ok { + return []ZoneResourceSpec{} + } + + if len(a) == 0 { + return []ZoneResourceSpec{} + } + + items := make([]ZoneResourceSpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneResourceSpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneResourceSpec expands an instance of ZoneResourceSpec into a JSON +// request object. +func expandZoneResourceSpec(c *Client, f *ZoneResourceSpec, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.LocationType; !dcl.IsEmptyValueIndirect(v) { + m["locationType"] = v + } + + return m, nil +} + +// flattenZoneResourceSpec flattens an instance of ZoneResourceSpec from a JSON +// response object. +func flattenZoneResourceSpec(c *Client, i interface{}, res *Zone) *ZoneResourceSpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneResourceSpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneResourceSpec + } + r.LocationType = flattenZoneResourceSpecLocationTypeEnum(m["locationType"]) + + return r +} + +// expandZoneAssetStatusMap expands the contents of ZoneAssetStatus into a JSON +// request object. +func expandZoneAssetStatusMap(c *Client, f map[string]ZoneAssetStatus, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneAssetStatusSlice expands the contents of ZoneAssetStatus into a JSON +// request object. +func expandZoneAssetStatusSlice(c *Client, f []ZoneAssetStatus, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneAssetStatusMap flattens the contents of ZoneAssetStatus from a JSON +// response object. +func flattenZoneAssetStatusMap(c *Client, i interface{}, res *Zone) map[string]ZoneAssetStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneAssetStatus{} + } + + if len(a) == 0 { + return map[string]ZoneAssetStatus{} + } + + items := make(map[string]ZoneAssetStatus) + for k, item := range a { + items[k] = *flattenZoneAssetStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneAssetStatusSlice flattens the contents of ZoneAssetStatus from a JSON +// response object. +func flattenZoneAssetStatusSlice(c *Client, i interface{}, res *Zone) []ZoneAssetStatus { + a, ok := i.([]interface{}) + if !ok { + return []ZoneAssetStatus{} + } + + if len(a) == 0 { + return []ZoneAssetStatus{} + } + + items := make([]ZoneAssetStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneAssetStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneAssetStatus expands an instance of ZoneAssetStatus into a JSON +// request object. +func expandZoneAssetStatus(c *Client, f *ZoneAssetStatus, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.ActiveAssets; !dcl.IsEmptyValueIndirect(v) { + m["activeAssets"] = v + } + if v := f.SecurityPolicyApplyingAssets; !dcl.IsEmptyValueIndirect(v) { + m["securityPolicyApplyingAssets"] = v + } + + return m, nil +} + +// flattenZoneAssetStatus flattens an instance of ZoneAssetStatus from a JSON +// response object. +func flattenZoneAssetStatus(c *Client, i interface{}, res *Zone) *ZoneAssetStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneAssetStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneAssetStatus + } + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.ActiveAssets = dcl.FlattenInteger(m["activeAssets"]) + r.SecurityPolicyApplyingAssets = dcl.FlattenInteger(m["securityPolicyApplyingAssets"]) + + return r +} + +// flattenZoneStateEnumMap flattens the contents of ZoneStateEnum from a JSON +// response object. +func flattenZoneStateEnumMap(c *Client, i interface{}, res *Zone) map[string]ZoneStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneStateEnum{} + } + + if len(a) == 0 { + return map[string]ZoneStateEnum{} + } + + items := make(map[string]ZoneStateEnum) + for k, item := range a { + items[k] = *flattenZoneStateEnum(item.(interface{})) + } + + return items +} + +// flattenZoneStateEnumSlice flattens the contents of ZoneStateEnum from a JSON +// response object. +func flattenZoneStateEnumSlice(c *Client, i interface{}, res *Zone) []ZoneStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ZoneStateEnum{} + } + + if len(a) == 0 { + return []ZoneStateEnum{} + } + + items := make([]ZoneStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneStateEnum(item.(interface{}))) + } + + return items +} + +// flattenZoneStateEnum asserts that an interface is a string, and returns a +// pointer to a *ZoneStateEnum with the same value as that string. +func flattenZoneStateEnum(i interface{}) *ZoneStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ZoneStateEnumRef(s) +} + +// flattenZoneTypeEnumMap flattens the contents of ZoneTypeEnum from a JSON +// response object. +func flattenZoneTypeEnumMap(c *Client, i interface{}, res *Zone) map[string]ZoneTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneTypeEnum{} + } + + if len(a) == 0 { + return map[string]ZoneTypeEnum{} + } + + items := make(map[string]ZoneTypeEnum) + for k, item := range a { + items[k] = *flattenZoneTypeEnum(item.(interface{})) + } + + return items +} + +// flattenZoneTypeEnumSlice flattens the contents of ZoneTypeEnum from a JSON +// response object. +func flattenZoneTypeEnumSlice(c *Client, i interface{}, res *Zone) []ZoneTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ZoneTypeEnum{} + } + + if len(a) == 0 { + return []ZoneTypeEnum{} + } + + items := make([]ZoneTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenZoneTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ZoneTypeEnum with the same value as that string. +func flattenZoneTypeEnum(i interface{}) *ZoneTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ZoneTypeEnumRef(s) +} + +// flattenZoneResourceSpecLocationTypeEnumMap flattens the contents of ZoneResourceSpecLocationTypeEnum from a JSON +// response object. +func flattenZoneResourceSpecLocationTypeEnumMap(c *Client, i interface{}, res *Zone) map[string]ZoneResourceSpecLocationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneResourceSpecLocationTypeEnum{} + } + + if len(a) == 0 { + return map[string]ZoneResourceSpecLocationTypeEnum{} + } + + items := make(map[string]ZoneResourceSpecLocationTypeEnum) + for k, item := range a { + items[k] = *flattenZoneResourceSpecLocationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenZoneResourceSpecLocationTypeEnumSlice flattens the contents of ZoneResourceSpecLocationTypeEnum from a JSON +// response object. +func flattenZoneResourceSpecLocationTypeEnumSlice(c *Client, i interface{}, res *Zone) []ZoneResourceSpecLocationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ZoneResourceSpecLocationTypeEnum{} + } + + if len(a) == 0 { + return []ZoneResourceSpecLocationTypeEnum{} + } + + items := make([]ZoneResourceSpecLocationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneResourceSpecLocationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenZoneResourceSpecLocationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ZoneResourceSpecLocationTypeEnum with the same value as that string. +func flattenZoneResourceSpecLocationTypeEnum(i interface{}) *ZoneResourceSpecLocationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ZoneResourceSpecLocationTypeEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Zone) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalZone(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Lake == nil && ncr.Lake == nil { + c.Config.Logger.Info("Both Lake fields null - considering equal.") + } else if nr.Lake == nil || ncr.Lake == nil { + c.Config.Logger.Info("Only one Lake field is null - considering unequal.") + return false + } else if *nr.Lake != *ncr.Lake { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type zoneDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp zoneApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToZoneDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]zoneDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []zoneDiff + // For each operation name, create a zoneDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := zoneDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToZoneApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToZoneApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (zoneApiOperation, error) { + switch opName { + + case "updateZoneUpdateZoneOperation": + return &updateZoneUpdateZoneOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractZoneFields(r *Zone) error { + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &ZoneDiscoverySpec{} + } + if err := extractZoneDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &ZoneResourceSpec{} + } + if err := extractZoneResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &ZoneAssetStatus{} + } + if err := extractZoneAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + return nil +} +func extractZoneDiscoverySpecFields(r *Zone, o *ZoneDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &ZoneDiscoverySpecCsvOptions{} + } + if err := extractZoneDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &ZoneDiscoverySpecJsonOptions{} + } + if err := extractZoneDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func extractZoneDiscoverySpecCsvOptionsFields(r *Zone, o *ZoneDiscoverySpecCsvOptions) error { + return nil +} +func extractZoneDiscoverySpecJsonOptionsFields(r *Zone, o *ZoneDiscoverySpecJsonOptions) error { + return nil +} +func extractZoneResourceSpecFields(r *Zone, o *ZoneResourceSpec) error { + return nil +} +func extractZoneAssetStatusFields(r *Zone, o *ZoneAssetStatus) error { + return nil +} + +func postReadExtractZoneFields(r *Zone) error { + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &ZoneDiscoverySpec{} + } + if err := postReadExtractZoneDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &ZoneResourceSpec{} + } + if err := postReadExtractZoneResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &ZoneAssetStatus{} + } + if err := postReadExtractZoneAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + return nil +} +func postReadExtractZoneDiscoverySpecFields(r *Zone, o *ZoneDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &ZoneDiscoverySpecCsvOptions{} + } + if err := extractZoneDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &ZoneDiscoverySpecJsonOptions{} + } + if err := extractZoneDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func postReadExtractZoneDiscoverySpecCsvOptionsFields(r *Zone, o *ZoneDiscoverySpecCsvOptions) error { + return nil +} +func postReadExtractZoneDiscoverySpecJsonOptionsFields(r *Zone, o *ZoneDiscoverySpecJsonOptions) error { + return nil +} +func postReadExtractZoneResourceSpecFields(r *Zone, o *ZoneResourceSpec) error { + return nil +} +func postReadExtractZoneAssetStatusFields(r *Zone, o *ZoneAssetStatus) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/client.go b/mmv1/third_party/terraform/services/dataproc/client.go new file mode 100644 index 000000000000..006c4fa2094c --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/client.go @@ -0,0 +1,18 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go new file mode 100644 index 000000000000..f75e4dc8e066 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLDataprocClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.DataprocBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl new file mode 100644 index 000000000000..2ca6fad78138 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl @@ -0,0 +1,4377 @@ +package dataproc + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataprocWorkflowTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocWorkflowTemplateCreate, + Read: resourceDataprocWorkflowTemplateRead, + Update: resourceDataprocWorkflowTemplateUpdate, + Delete: resourceDataprocWorkflowTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocWorkflowTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceDataprocWorkflowTemplateResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceDataprocWorkflowTemplateUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "jobs": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The Directed Acyclic Graph of Jobs to submit.", + Elem: DataprocWorkflowTemplateJobsSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", + }, + + "placement": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. WorkflowTemplate scheduling information.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementSchema(), + }, + + "dag_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The encryption configuration for the workflow template.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateEncryptionConfigSchema(), + }, + + "parameters": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", + Elem: DataprocWorkflowTemplateParametersSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "version": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Output only. The current version of this workflow template.", + Deprecated: "version is not useful as a configurable field, and will be removed in the future.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was last updated.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + }, + + "hadoop_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hadoop job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobSchema(), + }, + + "hive_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hive job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "pig_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Pig job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobSchema(), + }, + + "prerequisite_step_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "presto_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Presto job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobSchema(), + }, + + "pyspark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a PySpark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobSchema(), + }, + + "scheduling": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job scheduling configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSchedulingSchema(), + }, + + "spark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Spark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobSchema(), + }, + + "spark_r_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkR job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobSchema(), + }, + + "spark_sql_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkSql job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains Hive queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains the Pig queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Presto client tags to attach to this query", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema(), + }, + + "output_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobQueryListSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_python_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "python_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchedulingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_failures_per_hour": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", + }, + + "max_failures_total": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file that contains the main class.", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_r_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_selector": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementClusterSelectorSchema(), + }, + + "managed_cluster": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A cluster that is managed by the workflow.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementClusterSelectorSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_labels": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + Description: "Required. The cluster labels. Cluster must have all labels to match.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + }, + + "config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The cluster configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchema(), + }, + + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Encryption settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchema(), + }, + + "endpoint_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Port/endpoint configuration for this cluster", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSchema(), + }, + + "gce_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema(), + }, + +{{- if ne $.TargetVersionName "ga" }} + "gke_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSchema(), + }, + +{{- end }} + "initialization_actions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSchema(), + }, + + "lifecycle_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Lifecycle setting for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSchema(), + }, + + "master_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchema(), +{{- if ne $.TargetVersionName "ga" }} + }, + + "metastore_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Metastore configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSchema(), +{{- end }} + }, + + "secondary_worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSchema(), + }, + + "security_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Security settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSchema(), + }, + + "software_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The config settings for software inside the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSchema(), + }, + + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "temp_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gce_pd_kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_http_port_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + }, + + "http_ports": { + Type: schema.TypeMap, + Computed: true, + Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "internal_ip_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", + }, + + "node_group_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Node Group Affinity for sole-tenant clusters.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySchema(), + }, + + "private_ipv6_google_access": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", + }, + + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Reservation Affinity for consuming Zonal reservation.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySchema(), + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + }, + + "service_account_scopes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSchema(), + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label key of reservation resource.", + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label values of reservation resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether instances have integrity monitoring enabled. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not.", + }, + + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether the instances have Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", + }, + + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether the instance have the vTPM enabled. Virtual Trusted Platform Module protects objects like keys, certificates and enables Measured Boot by performing the measurements needed to create a known good boot baseline, called the integrity policy baseline.", + }, + }, + } +} + +{{- if ne $.TargetVersionName "ga" }} +func DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespaced_gke_deployment_target": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A target for the deployment.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. A namespace within the GKE cluster to deploy into.", + }, + + "target_gke_cluster": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + }, + }, + } +} + +{{- end }} +func DataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "executable_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Required. Cloud Storage URI of executable file.", + }, + + "execution_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete_time": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "auto_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_start_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_metastore_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", +{{- end }} + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kerberos_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Kerberos related configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cross_realm_trust_admin_server": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_kdc": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + }, + + "cross_realm_trust_shared_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + }, + + "enable_kerberos": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + }, + + "kdc_db_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + }, + + "key_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "keystore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "keystore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", + }, + + "realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + }, + + "root_principal_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + }, + + "tgt_lifetime_hours": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + }, + + "truststore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "truststore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + }, + + "optional_components": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The set of components to activate on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplateEncryptionConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Cloud KMS key name to use for encryption.", + }, + }, + } +} + +func DataprocWorkflowTemplateParametersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", + }, + + "validation": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Validation rules to be applied to this parameter's value.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on regular expressions.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationRegexSchema(), + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on a list of allowed values.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationValuesSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationRegexSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regexes": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationValuesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. List of allowed values for the parameter.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EncryptionConfig: expandDataprocWorkflowTemplateEncryptionConfig(d.Get("encryption_config")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkflowTemplate(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating WorkflowTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkflowTemplate %q: %#v", d.Id(), res) + + return resourceDataprocWorkflowTemplateRead(d, meta) +} + +func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EncryptionConfig: expandDataprocWorkflowTemplateEncryptionConfig(d.Get("encryption_config")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkflowTemplate(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataprocWorkflowTemplate %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("jobs", flattenDataprocWorkflowTemplateJobsArray(res.Jobs)); err != nil { + return fmt.Errorf("error setting jobs in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("placement", flattenDataprocWorkflowTemplatePlacement(res.Placement)); err != nil { + return fmt.Errorf("error setting placement in state: %s", err) + } + if err = d.Set("dag_timeout", res.DagTimeout); err != nil { + return fmt.Errorf("error setting dag_timeout in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("encryption_config", flattenDataprocWorkflowTemplateEncryptionConfig(res.EncryptionConfig)); err != nil { + return fmt.Errorf("error setting encryption_config in state: %s", err) + } + if err = d.Set("parameters", flattenDataprocWorkflowTemplateParametersArray(res.Parameters)); err != nil { + return fmt.Errorf("error setting parameters in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenDataprocWorkflowTemplateLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataprocWorkflowTemplateTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataprocWorkflowTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + + return resourceDataprocWorkflowTemplateRead(d, meta) +} + +func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EncryptionConfig: expandDataprocWorkflowTemplateEncryptionConfig(d.Get("encryption_config")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + log.Printf("[DEBUG] Deleting WorkflowTemplate %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkflowTemplate(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting WorkflowTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished deleting WorkflowTemplate %q", d.Id()) + return nil +} + +func resourceDataprocWorkflowTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/workflowTemplates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataprocWorkflowTemplateJobsArray(o interface{}) []WorkflowTemplateJobs { + if o == nil { + return make([]WorkflowTemplateJobs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkflowTemplateJobs, 0) + } + + items := make([]WorkflowTemplateJobs, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateJobs(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateJobs(o interface{}) *WorkflowTemplateJobs { + if o == nil { + return EmptyWorkflowTemplateJobs + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplateJobs{ + StepId: dcl.String(obj["step_id"].(string)), + HadoopJob: expandDataprocWorkflowTemplateJobsHadoopJob(obj["hadoop_job"]), + HiveJob: expandDataprocWorkflowTemplateJobsHiveJob(obj["hive_job"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), + PigJob: expandDataprocWorkflowTemplateJobsPigJob(obj["pig_job"]), + PrerequisiteStepIds: dcl.ExpandStringArray(obj["prerequisite_step_ids"]), + PrestoJob: expandDataprocWorkflowTemplateJobsPrestoJob(obj["presto_job"]), + PysparkJob: expandDataprocWorkflowTemplateJobsPysparkJob(obj["pyspark_job"]), + Scheduling: expandDataprocWorkflowTemplateJobsScheduling(obj["scheduling"]), + SparkJob: expandDataprocWorkflowTemplateJobsSparkJob(obj["spark_job"]), + SparkRJob: expandDataprocWorkflowTemplateJobsSparkRJob(obj["spark_r_job"]), + SparkSqlJob: expandDataprocWorkflowTemplateJobsSparkSqlJob(obj["spark_sql_job"]), + } +} + +func flattenDataprocWorkflowTemplateJobsArray(objs []WorkflowTemplateJobs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateJobs(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateJobs(obj *WorkflowTemplateJobs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "step_id": obj.StepId, + "hadoop_job": flattenDataprocWorkflowTemplateJobsHadoopJob(obj.HadoopJob), + "hive_job": flattenDataprocWorkflowTemplateJobsHiveJob(obj.HiveJob), + "labels": obj.Labels, + "pig_job": flattenDataprocWorkflowTemplateJobsPigJob(obj.PigJob), + "prerequisite_step_ids": obj.PrerequisiteStepIds, + "presto_job": flattenDataprocWorkflowTemplateJobsPrestoJob(obj.PrestoJob), + "pyspark_job": flattenDataprocWorkflowTemplateJobsPysparkJob(obj.PysparkJob), + "scheduling": flattenDataprocWorkflowTemplateJobsScheduling(obj.Scheduling), + "spark_job": flattenDataprocWorkflowTemplateJobsSparkJob(obj.SparkJob), + "spark_r_job": flattenDataprocWorkflowTemplateJobsSparkRJob(obj.SparkRJob), + "spark_sql_job": flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj.SparkSqlJob), + } + + return transformed + +} + +func expandDataprocWorkflowTemplateJobsHadoopJob(o interface{}) *WorkflowTemplateJobsHadoopJob { + if o == nil { + return EmptyWorkflowTemplateJobsHadoopJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHadoopJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHadoopJob{ + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj["logging_config"]), + MainClass: dcl.String(obj["main_class"].(string)), + MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHadoopJob(obj *WorkflowTemplateJobsHadoopJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj.LoggingConfig), + "main_class": obj.MainClass, + "main_jar_file_uri": obj.MainJarFileUri, + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(o interface{}) *WorkflowTemplateJobsHadoopJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHadoopJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj *WorkflowTemplateJobsHadoopJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHiveJob(o interface{}) *WorkflowTemplateJobsHiveJob { + if o == nil { + return EmptyWorkflowTemplateJobsHiveJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHiveJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHiveJob{ + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsHiveJobQueryList(obj["query_list"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHiveJob(obj *WorkflowTemplateJobsHiveJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "continue_on_failure": obj.ContinueOnFailure, + "jar_file_uris": obj.JarFileUris, + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHiveJobQueryList(o interface{}) *WorkflowTemplateJobsHiveJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsHiveJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHiveJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHiveJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj *WorkflowTemplateJobsHiveJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJob(o interface{}) *WorkflowTemplateJobsPigJob { + if o == nil { + return EmptyWorkflowTemplateJobsPigJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPigJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPigJob{ + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsPigJobQueryList(obj["query_list"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJob(obj *WorkflowTemplateJobsPigJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "continue_on_failure": obj.ContinueOnFailure, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(o interface{}) *WorkflowTemplateJobsPigJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPigJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj *WorkflowTemplateJobsPigJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJobQueryList(o interface{}) *WorkflowTemplateJobsPigJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsPigJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPigJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPigJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj *WorkflowTemplateJobsPigJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJob(o interface{}) *WorkflowTemplateJobsPrestoJob { + if o == nil { + return EmptyWorkflowTemplateJobsPrestoJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPrestoJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPrestoJob{ + ClientTags: dcl.ExpandStringArray(obj["client_tags"]), + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + LoggingConfig: expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj["logging_config"]), + OutputFormat: dcl.String(obj["output_format"].(string)), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsPrestoJobQueryList(obj["query_list"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJob(obj *WorkflowTemplateJobsPrestoJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "client_tags": obj.ClientTags, + "continue_on_failure": obj.ContinueOnFailure, + "logging_config": flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj.LoggingConfig), + "output_format": obj.OutputFormat, + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj.QueryList), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(o interface{}) *WorkflowTemplateJobsPrestoJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPrestoJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj *WorkflowTemplateJobsPrestoJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJobQueryList(o interface{}) *WorkflowTemplateJobsPrestoJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsPrestoJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPrestoJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPrestoJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj *WorkflowTemplateJobsPrestoJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPysparkJob(o interface{}) *WorkflowTemplateJobsPysparkJob { + if o == nil { + return EmptyWorkflowTemplateJobsPysparkJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPysparkJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPysparkJob{ + MainPythonFileUri: dcl.String(obj["main_python_file_uri"].(string)), + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + PythonFileUris: dcl.ExpandStringArray(obj["python_file_uris"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPysparkJob(obj *WorkflowTemplateJobsPysparkJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "main_python_file_uri": obj.MainPythonFileUri, + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "python_file_uris": obj.PythonFileUris, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(o interface{}) *WorkflowTemplateJobsPysparkJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPysparkJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj *WorkflowTemplateJobsPysparkJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsScheduling(o interface{}) *WorkflowTemplateJobsScheduling { + if o == nil { + return EmptyWorkflowTemplateJobsScheduling + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsScheduling + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsScheduling{ + MaxFailuresPerHour: dcl.Int64(int64(obj["max_failures_per_hour"].(int))), + MaxFailuresTotal: dcl.Int64(int64(obj["max_failures_total"].(int))), + } +} + +func flattenDataprocWorkflowTemplateJobsScheduling(obj *WorkflowTemplateJobsScheduling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_failures_per_hour": obj.MaxFailuresPerHour, + "max_failures_total": obj.MaxFailuresTotal, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkJob(o interface{}) *WorkflowTemplateJobsSparkJob { + if o == nil { + return EmptyWorkflowTemplateJobsSparkJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkJob{ + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj["logging_config"]), + MainClass: dcl.String(obj["main_class"].(string)), + MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkJob(obj *WorkflowTemplateJobsSparkJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj.LoggingConfig), + "main_class": obj.MainClass, + "main_jar_file_uri": obj.MainJarFileUri, + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(o interface{}) *WorkflowTemplateJobsSparkJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj *WorkflowTemplateJobsSparkJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkRJob(o interface{}) *WorkflowTemplateJobsSparkRJob { + if o == nil { + return EmptyWorkflowTemplateJobsSparkRJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkRJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkRJob{ + MainRFileUri: dcl.String(obj["main_r_file_uri"].(string)), + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkRJob(obj *WorkflowTemplateJobsSparkRJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "main_r_file_uri": obj.MainRFileUri, + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(o interface{}) *WorkflowTemplateJobsSparkRJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkRJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj *WorkflowTemplateJobsSparkRJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJob(o interface{}) *WorkflowTemplateJobsSparkSqlJob { + if o == nil { + return EmptyWorkflowTemplateJobsSparkSqlJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkSqlJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkSqlJob{ + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj["query_list"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj *WorkflowTemplateJobsSparkSqlJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(o interface{}) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkSqlJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj *WorkflowTemplateJobsSparkSqlJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(o interface{}) *WorkflowTemplateJobsSparkSqlJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkSqlJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj *WorkflowTemplateJobsSparkSqlJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacement(o interface{}) *WorkflowTemplatePlacement { + if o == nil { + return EmptyWorkflowTemplatePlacement + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacement + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacement{ + ClusterSelector: expandDataprocWorkflowTemplatePlacementClusterSelector(obj["cluster_selector"]), + ManagedCluster: expandDataprocWorkflowTemplatePlacementManagedCluster(obj["managed_cluster"]), + } +} + +func flattenDataprocWorkflowTemplatePlacement(obj *WorkflowTemplatePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_selector": flattenDataprocWorkflowTemplatePlacementClusterSelector(obj.ClusterSelector), + "managed_cluster": flattenDataprocWorkflowTemplatePlacementManagedCluster(obj.ManagedCluster), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementClusterSelector(o interface{}) *WorkflowTemplatePlacementClusterSelector { + if o == nil { + return EmptyWorkflowTemplatePlacementClusterSelector + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementClusterSelector + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementClusterSelector{ + ClusterLabels: tpgresource.CheckStringMap(obj["cluster_labels"]), + Zone: dcl.StringOrNil(obj["zone"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementClusterSelector(obj *WorkflowTemplatePlacementClusterSelector) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_labels": obj.ClusterLabels, + "zone": obj.Zone, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedCluster(o interface{}) *WorkflowTemplatePlacementManagedCluster { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedCluster + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedCluster + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedCluster{ + ClusterName: dcl.String(obj["cluster_name"].(string)), + Config: expandDataprocWorkflowTemplatePlacementManagedClusterConfig(obj["config"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedCluster(obj *WorkflowTemplatePlacementManagedCluster) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_name": obj.ClusterName, + "config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfig(obj.Config), + "labels": obj.Labels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfig{ + AutoscalingConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(obj["autoscaling_config"]), + EncryptionConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(obj["encryption_config"]), + EndpointConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(obj["endpoint_config"]), + GceClusterConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(obj["gce_cluster_config"]), +{{- if ne $.TargetVersionName "ga" }} + GkeClusterConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(obj["gke_cluster_config"]), +{{- end }} + InitializationActions: expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(obj["initialization_actions"]), + LifecycleConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(obj["lifecycle_config"]), + MasterConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(obj["master_config"]), +{{- if ne $.TargetVersionName "ga" }} + MetastoreConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(obj["metastore_config"]), +{{- end }} + SecondaryWorkerConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(obj["secondary_worker_config"]), + SecurityConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(obj["security_config"]), + SoftwareConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(obj["software_config"]), + StagingBucket: dcl.String(obj["staging_bucket"].(string)), + TempBucket: dcl.String(obj["temp_bucket"].(string)), + WorkerConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(obj["worker_config"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfig(obj *WorkflowTemplatePlacementManagedClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "autoscaling_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(obj.AutoscalingConfig), + "encryption_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(obj.EncryptionConfig), + "endpoint_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(obj.EndpointConfig), + "gce_cluster_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(obj.GceClusterConfig), +{{- if ne $.TargetVersionName "ga" }} + "gke_cluster_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(obj.GkeClusterConfig), +{{- end }} + "initialization_actions": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(obj.InitializationActions), + "lifecycle_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(obj.LifecycleConfig), + "master_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(obj.MasterConfig), +{{- if ne $.TargetVersionName "ga" }} + "metastore_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(obj.MetastoreConfig), +{{- end }} + "secondary_worker_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(obj.SecondaryWorkerConfig), + "security_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(obj.SecurityConfig), + "software_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(obj.SoftwareConfig), + "staging_bucket": obj.StagingBucket, + "temp_bucket": obj.TempBucket, + "worker_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(obj.WorkerConfig), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{ + Policy: dcl.String(obj["policy"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(obj *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "policy": obj.Policy, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{ + GcePdKmsKeyName: dcl.String(obj["gce_pd_kms_key_name"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(obj *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gce_pd_kms_key_name": obj.GcePdKmsKeyName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{ + EnableHttpPortAccess: dcl.Bool(obj["enable_http_port_access"].(bool)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(obj *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_http_port_access": obj.EnableHttpPortAccess, + "http_ports": obj.HttpPorts, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{ + InternalIPOnly: dcl.Bool(obj["internal_ip_only"].(bool)), + Metadata: tpgresource.CheckStringMap(obj["metadata"]), + Network: dcl.String(obj["network"].(string)), + NodeGroupAffinity: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj["node_group_affinity"]), + PrivateIPv6GoogleAccess: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(obj["private_ipv6_google_access"].(string)), + ReservationAffinity: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj["reservation_affinity"]), + ServiceAccount: dcl.String(obj["service_account"].(string)), + ServiceAccountScopes: dcl.ExpandStringArray(obj["service_account_scopes"]), + ShieldedInstanceConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj["shielded_instance_config"]), + Subnetwork: dcl.String(obj["subnetwork"].(string)), + Tags: dcl.ExpandStringArray(obj["tags"]), + Zone: dcl.StringOrNil(obj["zone"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "internal_ip_only": obj.InternalIPOnly, + "metadata": obj.Metadata, + "network": obj.Network, + "node_group_affinity": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj.NodeGroupAffinity), + "private_ipv6_google_access": obj.PrivateIPv6GoogleAccess, + "reservation_affinity": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj.ReservationAffinity), + "service_account": obj.ServiceAccount, + "service_account_scopes": obj.ServiceAccountScopes, + "shielded_instance_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj.ShieldedInstanceConfig), + "subnetwork": obj.Subnetwork, + "tags": obj.Tags, + "zone": obj.Zone, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{ + NodeGroup: dcl.String(obj["node_group"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "node_group": obj.NodeGroup, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{ + ConsumeReservationType: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(obj["consume_reservation_type"].(string)), + Key: dcl.String(obj["key"].(string)), + Values: dcl.ExpandStringArray(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "consume_reservation_type": obj.ConsumeReservationType, + "key": obj.Key, + "values": obj.Values, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{ + EnableIntegrityMonitoring: dcl.Bool(obj["enable_integrity_monitoring"].(bool)), + EnableSecureBoot: dcl.Bool(obj["enable_secure_boot"].(bool)), + EnableVtpm: dcl.Bool(obj["enable_vtpm"].(bool)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_integrity_monitoring": obj.EnableIntegrityMonitoring, + "enable_secure_boot": obj.EnableSecureBoot, + "enable_vtpm": obj.EnableVtpm, + } + + return []interface{}{transformed} + +} +{{- if ne $.TargetVersionName "ga" }} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{ + NamespacedGkeDeploymentTarget: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(obj["namespaced_gke_deployment_target"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(obj *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "namespaced_gke_deployment_target": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(obj.NamespacedGkeDeploymentTarget), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{ + ClusterNamespace: dcl.String(obj["cluster_namespace"].(string)), + TargetGkeCluster: dcl.String(obj["target_gke_cluster"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(obj *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_namespace": obj.ClusterNamespace, + "target_gke_cluster": obj.TargetGkeCluster, + } + + return []interface{}{transformed} + +} +{{- end }} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if o == nil { + return make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0) + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{ + ExecutableFile: dcl.String(obj["executable_file"].(string)), + ExecutionTimeout: dcl.String(obj["execution_timeout"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(objs []WorkflowTemplatePlacementManagedClusterConfigInitializationActions) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(obj *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "executable_file": obj.ExecutableFile, + "execution_timeout": obj.ExecutionTimeout, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{ + AutoDeleteTime: dcl.String(obj["auto_delete_time"].(string)), + AutoDeleteTtl: dcl.String(obj["auto_delete_ttl"].(string)), + IdleDeleteTtl: dcl.String(obj["idle_delete_ttl"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(obj *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "auto_delete_time": obj.AutoDeleteTime, + "auto_delete_ttl": obj.AutoDeleteTtl, + "idle_delete_ttl": obj.IdleDeleteTtl, + "idle_start_time": obj.IdleStartTime, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{ + Accelerators: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(objs []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, +{{- if ne $.TargetVersionName "ga" }} + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{ + DataprocMetastoreService: dcl.String(obj["dataproc_metastore_service"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "dataproc_metastore_service": obj.DataprocMetastoreService, +{{- end }} + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{ + Accelerators: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(objs []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{ + KerberosConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(obj["kerberos_config"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kerberos_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(obj.KerberosConfig), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{ + CrossRealmTrustAdminServer: dcl.String(obj["cross_realm_trust_admin_server"].(string)), + CrossRealmTrustKdc: dcl.String(obj["cross_realm_trust_kdc"].(string)), + CrossRealmTrustRealm: dcl.String(obj["cross_realm_trust_realm"].(string)), + CrossRealmTrustSharedPassword: dcl.String(obj["cross_realm_trust_shared_password"].(string)), + EnableKerberos: dcl.Bool(obj["enable_kerberos"].(bool)), + KdcDbKey: dcl.String(obj["kdc_db_key"].(string)), + KeyPassword: dcl.String(obj["key_password"].(string)), + Keystore: dcl.String(obj["keystore"].(string)), + KeystorePassword: dcl.String(obj["keystore_password"].(string)), + KmsKey: dcl.String(obj["kms_key"].(string)), + Realm: dcl.String(obj["realm"].(string)), + RootPrincipalPassword: dcl.String(obj["root_principal_password"].(string)), + TgtLifetimeHours: dcl.Int64(int64(obj["tgt_lifetime_hours"].(int))), + Truststore: dcl.String(obj["truststore"].(string)), + TruststorePassword: dcl.String(obj["truststore_password"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cross_realm_trust_admin_server": obj.CrossRealmTrustAdminServer, + "cross_realm_trust_kdc": obj.CrossRealmTrustKdc, + "cross_realm_trust_realm": obj.CrossRealmTrustRealm, + "cross_realm_trust_shared_password": obj.CrossRealmTrustSharedPassword, + "enable_kerberos": obj.EnableKerberos, + "kdc_db_key": obj.KdcDbKey, + "key_password": obj.KeyPassword, + "keystore": obj.Keystore, + "keystore_password": obj.KeystorePassword, + "kms_key": obj.KmsKey, + "realm": obj.Realm, + "root_principal_password": obj.RootPrincipalPassword, + "tgt_lifetime_hours": obj.TgtLifetimeHours, + "truststore": obj.Truststore, + "truststore_password": obj.TruststorePassword, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{ + ImageVersion: dcl.String(obj["image_version"].(string)), + OptionalComponents: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj["optional_components"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "image_version": obj.ImageVersion, + "optional_components": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj.OptionalComponents), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{ + Accelerators: expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(objs []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateEncryptionConfig(o interface{}) *WorkflowTemplateEncryptionConfig { + if o == nil { + return EmptyWorkflowTemplateEncryptionConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateEncryptionConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateEncryptionConfig{ + KmsKey: dcl.String(obj["kms_key"].(string)), + } +} + +func flattenDataprocWorkflowTemplateEncryptionConfig(obj *WorkflowTemplateEncryptionConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key": obj.KmsKey, + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplateParametersArray(o interface{}) []WorkflowTemplateParameters { + if o == nil { + return make([]WorkflowTemplateParameters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkflowTemplateParameters, 0) + } + + items := make([]WorkflowTemplateParameters, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateParameters(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateParameters(o interface{}) *WorkflowTemplateParameters { + if o == nil { + return EmptyWorkflowTemplateParameters + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplateParameters{ + Fields: dcl.ExpandStringArray(obj["fields"]), + Name: dcl.String(obj["name"].(string)), + Description: dcl.String(obj["description"].(string)), + Validation: expandDataprocWorkflowTemplateParametersValidation(obj["validation"]), + } +} + +func flattenDataprocWorkflowTemplateParametersArray(objs []WorkflowTemplateParameters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateParameters(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateParameters(obj *WorkflowTemplateParameters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "fields": obj.Fields, + "name": obj.Name, + "description": obj.Description, + "validation": flattenDataprocWorkflowTemplateParametersValidation(obj.Validation), + } + + return transformed + +} + +func expandDataprocWorkflowTemplateParametersValidation(o interface{}) *WorkflowTemplateParametersValidation { + if o == nil { + return EmptyWorkflowTemplateParametersValidation + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateParametersValidation + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateParametersValidation{ + Regex: expandDataprocWorkflowTemplateParametersValidationRegex(obj["regex"]), + Values: expandDataprocWorkflowTemplateParametersValidationValues(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidation(obj *WorkflowTemplateParametersValidation) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "regex": flattenDataprocWorkflowTemplateParametersValidationRegex(obj.Regex), + "values": flattenDataprocWorkflowTemplateParametersValidationValues(obj.Values), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateParametersValidationRegex(o interface{}) *WorkflowTemplateParametersValidationRegex { + if o == nil { + return EmptyWorkflowTemplateParametersValidationRegex + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateParametersValidationRegex + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateParametersValidationRegex{ + Regexes: dcl.ExpandStringArray(obj["regexes"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidationRegex(obj *WorkflowTemplateParametersValidationRegex) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "regexes": obj.Regexes, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateParametersValidationValues(o interface{}) *WorkflowTemplateParametersValidationValues { + if o == nil { + return EmptyWorkflowTemplateParametersValidationValues + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateParametersValidationValues + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateParametersValidationValues{ + Values: dcl.ExpandStringArray(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidationValues(obj *WorkflowTemplateParametersValidationValues) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "values": obj.Values, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplateLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataprocWorkflowTemplateTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + objs := o.([]interface{}) + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl index 671f2e291c66..35bb465f38b0 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_dataproc_workflow_template' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataproc.googleapis.com' api_version: 'v1' api_resource_type_kind: 'WorkflowTemplate' diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go new file mode 100644 index 000000000000..ee81f29ac0a9 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go @@ -0,0 +1,53 @@ +package dataproc + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("DataprocWorkflowTemplate", testSweepDataprocWorkflowTemplate) +} + +func testSweepDataprocWorkflowTemplate(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataprocWorkflowTemplate") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLDataprocClient(config, config.UserAgent, "", 0) + err = client.DeleteAllWorkflowTemplate(context.Background(), d["project"], d["location"], isDeletableDataprocWorkflowTemplate) + if err != nil { + return err + } + return nil +} + +func isDeletableDataprocWorkflowTemplate(r *WorkflowTemplate) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl new file mode 100644 index 000000000000..078b43778b7e --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl @@ -0,0 +1,3640 @@ +package dataproc + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type WorkflowTemplate struct { + Name *string `json:"name"` + Version *int64 `json:"version"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + EncryptionConfig *WorkflowTemplateEncryptionConfig `json:"encryptionConfig"` + Placement *WorkflowTemplatePlacement `json:"placement"` + Jobs []WorkflowTemplateJobs `json:"jobs"` + Parameters []WorkflowTemplateParameters `json:"parameters"` + DagTimeout *string `json:"dagTimeout"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *WorkflowTemplate) String() string { + return dcl.SprintResource(r) +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum. +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum string + +// WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + v := WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", "INHERIT_FROM_SUBNETWORK", "OUTBOUND", "BIDIRECTIONAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum. +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum string + +// WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + v := WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TYPE_UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum. +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum string + +// WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + v := WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum. +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum string + +// WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + v := WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum. +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum string + +// WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + v := WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum. +type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum string + +// WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + v := WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DOCKER", "DRUID", "FLINK", "HBASE", "HIVE_WEBHCAT", "JUPYTER", "KERBEROS", "PRESTO", "RANGER", "SOLR", "ZEPPELIN", "ZOOKEEPER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum", + Value: string(v), + Valid: []string{}, + } +} + +type WorkflowTemplateEncryptionConfig struct { + empty bool `json:"-"` + KmsKey *string `json:"kmsKey"` +} + +type jsonWorkflowTemplateEncryptionConfig WorkflowTemplateEncryptionConfig + +func (r *WorkflowTemplateEncryptionConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateEncryptionConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateEncryptionConfig + } else { + + r.KmsKey = res.KmsKey + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateEncryptionConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateEncryptionConfig *WorkflowTemplateEncryptionConfig = &WorkflowTemplateEncryptionConfig{empty: true} + +func (r *WorkflowTemplateEncryptionConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateEncryptionConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateEncryptionConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacement struct { + empty bool `json:"-"` + ManagedCluster *WorkflowTemplatePlacementManagedCluster `json:"managedCluster"` + ClusterSelector *WorkflowTemplatePlacementClusterSelector `json:"clusterSelector"` +} + +type jsonWorkflowTemplatePlacement WorkflowTemplatePlacement + +func (r *WorkflowTemplatePlacement) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacement + } else { + + r.ManagedCluster = res.ManagedCluster + + r.ClusterSelector = res.ClusterSelector + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacement *WorkflowTemplatePlacement = &WorkflowTemplatePlacement{empty: true} + +func (r *WorkflowTemplatePlacement) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacement) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedCluster struct { + empty bool `json:"-"` + ClusterName *string `json:"clusterName"` + Config *WorkflowTemplatePlacementManagedClusterConfig `json:"config"` + Labels map[string]string `json:"labels"` +} + +type jsonWorkflowTemplatePlacementManagedCluster WorkflowTemplatePlacementManagedCluster + +func (r *WorkflowTemplatePlacementManagedCluster) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedCluster + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedCluster + } else { + + r.ClusterName = res.ClusterName + + r.Config = res.Config + + r.Labels = res.Labels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedCluster is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedCluster *WorkflowTemplatePlacementManagedCluster = &WorkflowTemplatePlacementManagedCluster{empty: true} + +func (r *WorkflowTemplatePlacementManagedCluster) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedCluster) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedCluster) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfig struct { + empty bool `json:"-"` + StagingBucket *string `json:"stagingBucket"` + TempBucket *string `json:"tempBucket"` + GceClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig `json:"gceClusterConfig"` + MasterConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfig `json:"masterConfig"` + WorkerConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig `json:"workerConfig"` + SecondaryWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig `json:"secondaryWorkerConfig"` + SoftwareConfig *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig `json:"softwareConfig"` + InitializationActions []WorkflowTemplatePlacementManagedClusterConfigInitializationActions `json:"initializationActions"` + EncryptionConfig *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig `json:"encryptionConfig"` + AutoscalingConfig *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig `json:"autoscalingConfig"` + SecurityConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig `json:"securityConfig"` + LifecycleConfig *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig `json:"lifecycleConfig"` + EndpointConfig *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig `json:"endpointConfig"` +{{- if ne $.TargetVersionName "ga" }} + GkeClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig `json:"gkeClusterConfig"` + MetastoreConfig *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig `json:"metastoreConfig"` +{{- end }} +} + +type jsonWorkflowTemplatePlacementManagedClusterConfig WorkflowTemplatePlacementManagedClusterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfig + } else { + + r.StagingBucket = res.StagingBucket + + r.TempBucket = res.TempBucket + + r.GceClusterConfig = res.GceClusterConfig + + r.MasterConfig = res.MasterConfig + + r.WorkerConfig = res.WorkerConfig + + r.SecondaryWorkerConfig = res.SecondaryWorkerConfig + + r.SoftwareConfig = res.SoftwareConfig + + r.InitializationActions = res.InitializationActions + + r.EncryptionConfig = res.EncryptionConfig + + r.AutoscalingConfig = res.AutoscalingConfig + + r.SecurityConfig = res.SecurityConfig + + r.LifecycleConfig = res.LifecycleConfig + + r.EndpointConfig = res.EndpointConfig +{{- if ne $.TargetVersionName "ga" }} + + r.GkeClusterConfig = res.GkeClusterConfig + + r.MetastoreConfig = res.MetastoreConfig +{{- end }} + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfig *WorkflowTemplatePlacementManagedClusterConfig = &WorkflowTemplatePlacementManagedClusterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig struct { + empty bool `json:"-"` + Zone *string `json:"zone"` + Network *string `json:"network"` + Subnetwork *string `json:"subnetwork"` + InternalIPOnly *bool `json:"internalIPOnly"` + PrivateIPv6GoogleAccess *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum `json:"privateIPv6GoogleAccess"` + ServiceAccount *string `json:"serviceAccount"` + ServiceAccountScopes []string `json:"serviceAccountScopes"` + Tags []string `json:"tags"` + Metadata map[string]string `json:"metadata"` + ReservationAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity `json:"reservationAffinity"` + NodeGroupAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity `json:"nodeGroupAffinity"` + ShieldedInstanceConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig `json:"shieldedInstanceConfig"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } else { + + r.Zone = res.Zone + + r.Network = res.Network + + r.Subnetwork = res.Subnetwork + + r.InternalIPOnly = res.InternalIPOnly + + r.PrivateIPv6GoogleAccess = res.PrivateIPv6GoogleAccess + + r.ServiceAccount = res.ServiceAccount + + r.ServiceAccountScopes = res.ServiceAccountScopes + + r.Tags = res.Tags + + r.Metadata = res.Metadata + + r.ReservationAffinity = res.ReservationAffinity + + r.NodeGroupAffinity = res.NodeGroupAffinity + + r.ShieldedInstanceConfig = res.ShieldedInstanceConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity struct { + empty bool `json:"-"` + ConsumeReservationType *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum `json:"consumeReservationType"` + Key *string `json:"key"` + Values []string `json:"values"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } else { + + r.ConsumeReservationType = res.ConsumeReservationType + + r.Key = res.Key + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity struct { + empty bool `json:"-"` + NodeGroup *string `json:"nodeGroup"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } else { + + r.NodeGroup = res.NodeGroup + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig struct { + empty bool `json:"-"` + EnableSecureBoot *bool `json:"enableSecureBoot"` + EnableVtpm *bool `json:"enableVtpm"` + EnableIntegrityMonitoring *bool `json:"enableIntegrityMonitoring"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } else { + + r.EnableSecureBoot = res.EnableSecureBoot + + r.EnableVtpm = res.EnableVtpm + + r.EnableIntegrityMonitoring = res.EnableIntegrityMonitoring + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig struct { + empty bool `json:"-"` + ImageVersion *string `json:"imageVersion"` + Properties map[string]string `json:"properties"` + OptionalComponents []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum `json:"optionalComponents"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } else { + + r.ImageVersion = res.ImageVersion + + r.Properties = res.Properties + + r.OptionalComponents = res.OptionalComponents + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig = &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigInitializationActions struct { + empty bool `json:"-"` + ExecutableFile *string `json:"executableFile"` + ExecutionTimeout *string `json:"executionTimeout"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigInitializationActions WorkflowTemplatePlacementManagedClusterConfigInitializationActions + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigInitializationActions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions + } else { + + r.ExecutableFile = res.ExecutableFile + + r.ExecutionTimeout = res.ExecutionTimeout + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigInitializationActions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions *WorkflowTemplatePlacementManagedClusterConfigInitializationActions = &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig struct { + empty bool `json:"-"` + GcePdKmsKeyName *string `json:"gcePdKmsKeyName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } else { + + r.GcePdKmsKeyName = res.GcePdKmsKeyName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig = &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig struct { + empty bool `json:"-"` + Policy *string `json:"policy"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } else { + + r.Policy = res.Policy + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig = &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecurityConfig struct { + empty bool `json:"-"` + KerberosConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig `json:"kerberosConfig"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } else { + + r.KerberosConfig = res.KerberosConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecurityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig struct { + empty bool `json:"-"` + EnableKerberos *bool `json:"enableKerberos"` + RootPrincipalPassword *string `json:"rootPrincipalPassword"` + KmsKey *string `json:"kmsKey"` + Keystore *string `json:"keystore"` + Truststore *string `json:"truststore"` + KeystorePassword *string `json:"keystorePassword"` + KeyPassword *string `json:"keyPassword"` + TruststorePassword *string `json:"truststorePassword"` + CrossRealmTrustRealm *string `json:"crossRealmTrustRealm"` + CrossRealmTrustKdc *string `json:"crossRealmTrustKdc"` + CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer"` + CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword"` + KdcDbKey *string `json:"kdcDbKey"` + TgtLifetimeHours *int64 `json:"tgtLifetimeHours"` + Realm *string `json:"realm"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } else { + + r.EnableKerberos = res.EnableKerberos + + r.RootPrincipalPassword = res.RootPrincipalPassword + + r.KmsKey = res.KmsKey + + r.Keystore = res.Keystore + + r.Truststore = res.Truststore + + r.KeystorePassword = res.KeystorePassword + + r.KeyPassword = res.KeyPassword + + r.TruststorePassword = res.TruststorePassword + + r.CrossRealmTrustRealm = res.CrossRealmTrustRealm + + r.CrossRealmTrustKdc = res.CrossRealmTrustKdc + + r.CrossRealmTrustAdminServer = res.CrossRealmTrustAdminServer + + r.CrossRealmTrustSharedPassword = res.CrossRealmTrustSharedPassword + + r.KdcDbKey = res.KdcDbKey + + r.TgtLifetimeHours = res.TgtLifetimeHours + + r.Realm = res.Realm + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig struct { + empty bool `json:"-"` + IdleDeleteTtl *string `json:"idleDeleteTtl"` + AutoDeleteTime *string `json:"autoDeleteTime"` + AutoDeleteTtl *string `json:"autoDeleteTtl"` + IdleStartTime *string `json:"idleStartTime"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } else { + + r.IdleDeleteTtl = res.IdleDeleteTtl + + r.AutoDeleteTime = res.AutoDeleteTime + + r.AutoDeleteTtl = res.AutoDeleteTtl + + r.IdleStartTime = res.IdleStartTime + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig = &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigEndpointConfig struct { + empty bool `json:"-"` + HttpPorts map[string]string `json:"httpPorts"` + EnableHttpPortAccess *bool `json:"enableHttpPortAccess"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigEndpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } else { + + r.HttpPorts = res.HttpPorts + + r.EnableHttpPortAccess = res.EnableHttpPortAccess + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigEndpointConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig = &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig struct { + empty bool `json:"-"` + NamespacedGkeDeploymentTarget *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget `json:"namespacedGkeDeploymentTarget"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } else { + + r.NamespacedGkeDeploymentTarget = res.NamespacedGkeDeploymentTarget + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget struct { + empty bool `json:"-"` + TargetGkeCluster *string `json:"targetGkeCluster"` + ClusterNamespace *string `json:"clusterNamespace"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } else { + + r.TargetGkeCluster = res.TargetGkeCluster + + r.ClusterNamespace = res.ClusterNamespace + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig struct { + empty bool `json:"-"` + DataprocMetastoreService *string `json:"dataprocMetastoreService"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } else { + + r.DataprocMetastoreService = res.DataprocMetastoreService + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig = &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +type WorkflowTemplatePlacementClusterSelector struct { + empty bool `json:"-"` + Zone *string `json:"zone"` + ClusterLabels map[string]string `json:"clusterLabels"` +} + +type jsonWorkflowTemplatePlacementClusterSelector WorkflowTemplatePlacementClusterSelector + +func (r *WorkflowTemplatePlacementClusterSelector) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementClusterSelector + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementClusterSelector + } else { + + r.Zone = res.Zone + + r.ClusterLabels = res.ClusterLabels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementClusterSelector is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementClusterSelector *WorkflowTemplatePlacementClusterSelector = &WorkflowTemplatePlacementClusterSelector{empty: true} + +func (r *WorkflowTemplatePlacementClusterSelector) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementClusterSelector) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementClusterSelector) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobs struct { + empty bool `json:"-"` + StepId *string `json:"stepId"` + HadoopJob *WorkflowTemplateJobsHadoopJob `json:"hadoopJob"` + SparkJob *WorkflowTemplateJobsSparkJob `json:"sparkJob"` + PysparkJob *WorkflowTemplateJobsPysparkJob `json:"pysparkJob"` + HiveJob *WorkflowTemplateJobsHiveJob `json:"hiveJob"` + PigJob *WorkflowTemplateJobsPigJob `json:"pigJob"` + SparkRJob *WorkflowTemplateJobsSparkRJob `json:"sparkRJob"` + SparkSqlJob *WorkflowTemplateJobsSparkSqlJob `json:"sparkSqlJob"` + PrestoJob *WorkflowTemplateJobsPrestoJob `json:"prestoJob"` + Labels map[string]string `json:"labels"` + Scheduling *WorkflowTemplateJobsScheduling `json:"scheduling"` + PrerequisiteStepIds []string `json:"prerequisiteStepIds"` +} + +type jsonWorkflowTemplateJobs WorkflowTemplateJobs + +func (r *WorkflowTemplateJobs) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobs + } else { + + r.StepId = res.StepId + + r.HadoopJob = res.HadoopJob + + r.SparkJob = res.SparkJob + + r.PysparkJob = res.PysparkJob + + r.HiveJob = res.HiveJob + + r.PigJob = res.PigJob + + r.SparkRJob = res.SparkRJob + + r.SparkSqlJob = res.SparkSqlJob + + r.PrestoJob = res.PrestoJob + + r.Labels = res.Labels + + r.Scheduling = res.Scheduling + + r.PrerequisiteStepIds = res.PrerequisiteStepIds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobs *WorkflowTemplateJobs = &WorkflowTemplateJobs{empty: true} + +func (r *WorkflowTemplateJobs) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobs) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHadoopJob struct { + empty bool `json:"-"` + MainJarFileUri *string `json:"mainJarFileUri"` + MainClass *string `json:"mainClass"` + Args []string `json:"args"` + JarFileUris []string `json:"jarFileUris"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsHadoopJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsHadoopJob WorkflowTemplateJobsHadoopJob + +func (r *WorkflowTemplateJobsHadoopJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHadoopJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHadoopJob + } else { + + r.MainJarFileUri = res.MainJarFileUri + + r.MainClass = res.MainClass + + r.Args = res.Args + + r.JarFileUris = res.JarFileUris + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHadoopJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHadoopJob *WorkflowTemplateJobsHadoopJob = &WorkflowTemplateJobsHadoopJob{empty: true} + +func (r *WorkflowTemplateJobsHadoopJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHadoopJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHadoopJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHadoopJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsHadoopJobLoggingConfig WorkflowTemplateJobsHadoopJobLoggingConfig + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHadoopJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHadoopJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHadoopJobLoggingConfig *WorkflowTemplateJobsHadoopJobLoggingConfig = &WorkflowTemplateJobsHadoopJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkJob struct { + empty bool `json:"-"` + MainJarFileUri *string `json:"mainJarFileUri"` + MainClass *string `json:"mainClass"` + Args []string `json:"args"` + JarFileUris []string `json:"jarFileUris"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsSparkJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsSparkJob WorkflowTemplateJobsSparkJob + +func (r *WorkflowTemplateJobsSparkJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkJob + } else { + + r.MainJarFileUri = res.MainJarFileUri + + r.MainClass = res.MainClass + + r.Args = res.Args + + r.JarFileUris = res.JarFileUris + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkJob *WorkflowTemplateJobsSparkJob = &WorkflowTemplateJobsSparkJob{empty: true} + +func (r *WorkflowTemplateJobsSparkJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsSparkJobLoggingConfig WorkflowTemplateJobsSparkJobLoggingConfig + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkJobLoggingConfig *WorkflowTemplateJobsSparkJobLoggingConfig = &WorkflowTemplateJobsSparkJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPysparkJob struct { + empty bool `json:"-"` + MainPythonFileUri *string `json:"mainPythonFileUri"` + Args []string `json:"args"` + PythonFileUris []string `json:"pythonFileUris"` + JarFileUris []string `json:"jarFileUris"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsPysparkJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsPysparkJob WorkflowTemplateJobsPysparkJob + +func (r *WorkflowTemplateJobsPysparkJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPysparkJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPysparkJob + } else { + + r.MainPythonFileUri = res.MainPythonFileUri + + r.Args = res.Args + + r.PythonFileUris = res.PythonFileUris + + r.JarFileUris = res.JarFileUris + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPysparkJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPysparkJob *WorkflowTemplateJobsPysparkJob = &WorkflowTemplateJobsPysparkJob{empty: true} + +func (r *WorkflowTemplateJobsPysparkJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPysparkJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPysparkJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPysparkJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsPysparkJobLoggingConfig WorkflowTemplateJobsPysparkJobLoggingConfig + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPysparkJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPysparkJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPysparkJobLoggingConfig *WorkflowTemplateJobsPysparkJobLoggingConfig = &WorkflowTemplateJobsPysparkJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHiveJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsHiveJobQueryList `json:"queryList"` + ContinueOnFailure *bool `json:"continueOnFailure"` + ScriptVariables map[string]string `json:"scriptVariables"` + Properties map[string]string `json:"properties"` + JarFileUris []string `json:"jarFileUris"` +} + +type jsonWorkflowTemplateJobsHiveJob WorkflowTemplateJobsHiveJob + +func (r *WorkflowTemplateJobsHiveJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHiveJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHiveJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ContinueOnFailure = res.ContinueOnFailure + + r.ScriptVariables = res.ScriptVariables + + r.Properties = res.Properties + + r.JarFileUris = res.JarFileUris + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHiveJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHiveJob *WorkflowTemplateJobsHiveJob = &WorkflowTemplateJobsHiveJob{empty: true} + +func (r *WorkflowTemplateJobsHiveJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHiveJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHiveJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHiveJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsHiveJobQueryList WorkflowTemplateJobsHiveJobQueryList + +func (r *WorkflowTemplateJobsHiveJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHiveJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHiveJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHiveJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHiveJobQueryList *WorkflowTemplateJobsHiveJobQueryList = &WorkflowTemplateJobsHiveJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsHiveJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHiveJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHiveJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPigJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsPigJobQueryList `json:"queryList"` + ContinueOnFailure *bool `json:"continueOnFailure"` + ScriptVariables map[string]string `json:"scriptVariables"` + Properties map[string]string `json:"properties"` + JarFileUris []string `json:"jarFileUris"` + LoggingConfig *WorkflowTemplateJobsPigJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsPigJob WorkflowTemplateJobsPigJob + +func (r *WorkflowTemplateJobsPigJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPigJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPigJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ContinueOnFailure = res.ContinueOnFailure + + r.ScriptVariables = res.ScriptVariables + + r.Properties = res.Properties + + r.JarFileUris = res.JarFileUris + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPigJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPigJob *WorkflowTemplateJobsPigJob = &WorkflowTemplateJobsPigJob{empty: true} + +func (r *WorkflowTemplateJobsPigJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPigJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPigJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPigJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsPigJobQueryList WorkflowTemplateJobsPigJobQueryList + +func (r *WorkflowTemplateJobsPigJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPigJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPigJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPigJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPigJobQueryList *WorkflowTemplateJobsPigJobQueryList = &WorkflowTemplateJobsPigJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsPigJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPigJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPigJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPigJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsPigJobLoggingConfig WorkflowTemplateJobsPigJobLoggingConfig + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPigJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPigJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPigJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPigJobLoggingConfig *WorkflowTemplateJobsPigJobLoggingConfig = &WorkflowTemplateJobsPigJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkRJob struct { + empty bool `json:"-"` + MainRFileUri *string `json:"mainRFileUri"` + Args []string `json:"args"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsSparkRJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsSparkRJob WorkflowTemplateJobsSparkRJob + +func (r *WorkflowTemplateJobsSparkRJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkRJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkRJob + } else { + + r.MainRFileUri = res.MainRFileUri + + r.Args = res.Args + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkRJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkRJob *WorkflowTemplateJobsSparkRJob = &WorkflowTemplateJobsSparkRJob{empty: true} + +func (r *WorkflowTemplateJobsSparkRJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkRJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkRJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkRJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsSparkRJobLoggingConfig WorkflowTemplateJobsSparkRJobLoggingConfig + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkRJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkRJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkRJobLoggingConfig *WorkflowTemplateJobsSparkRJobLoggingConfig = &WorkflowTemplateJobsSparkRJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkSqlJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsSparkSqlJobQueryList `json:"queryList"` + ScriptVariables map[string]string `json:"scriptVariables"` + Properties map[string]string `json:"properties"` + JarFileUris []string `json:"jarFileUris"` + LoggingConfig *WorkflowTemplateJobsSparkSqlJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsSparkSqlJob WorkflowTemplateJobsSparkSqlJob + +func (r *WorkflowTemplateJobsSparkSqlJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkSqlJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkSqlJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ScriptVariables = res.ScriptVariables + + r.Properties = res.Properties + + r.JarFileUris = res.JarFileUris + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkSqlJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkSqlJob *WorkflowTemplateJobsSparkSqlJob = &WorkflowTemplateJobsSparkSqlJob{empty: true} + +func (r *WorkflowTemplateJobsSparkSqlJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkSqlJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkSqlJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkSqlJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsSparkSqlJobQueryList WorkflowTemplateJobsSparkSqlJobQueryList + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkSqlJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkSqlJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkSqlJobQueryList *WorkflowTemplateJobsSparkSqlJobQueryList = &WorkflowTemplateJobsSparkSqlJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkSqlJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsSparkSqlJobLoggingConfig WorkflowTemplateJobsSparkSqlJobLoggingConfig + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkSqlJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkSqlJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig *WorkflowTemplateJobsSparkSqlJobLoggingConfig = &WorkflowTemplateJobsSparkSqlJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPrestoJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsPrestoJobQueryList `json:"queryList"` + ContinueOnFailure *bool `json:"continueOnFailure"` + OutputFormat *string `json:"outputFormat"` + ClientTags []string `json:"clientTags"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsPrestoJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsPrestoJob WorkflowTemplateJobsPrestoJob + +func (r *WorkflowTemplateJobsPrestoJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPrestoJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPrestoJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ContinueOnFailure = res.ContinueOnFailure + + r.OutputFormat = res.OutputFormat + + r.ClientTags = res.ClientTags + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPrestoJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPrestoJob *WorkflowTemplateJobsPrestoJob = &WorkflowTemplateJobsPrestoJob{empty: true} + +func (r *WorkflowTemplateJobsPrestoJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPrestoJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPrestoJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPrestoJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsPrestoJobQueryList WorkflowTemplateJobsPrestoJobQueryList + +func (r *WorkflowTemplateJobsPrestoJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPrestoJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPrestoJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPrestoJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPrestoJobQueryList *WorkflowTemplateJobsPrestoJobQueryList = &WorkflowTemplateJobsPrestoJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsPrestoJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPrestoJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPrestoJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPrestoJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsPrestoJobLoggingConfig WorkflowTemplateJobsPrestoJobLoggingConfig + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPrestoJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPrestoJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPrestoJobLoggingConfig *WorkflowTemplateJobsPrestoJobLoggingConfig = &WorkflowTemplateJobsPrestoJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsScheduling struct { + empty bool `json:"-"` + MaxFailuresPerHour *int64 `json:"maxFailuresPerHour"` + MaxFailuresTotal *int64 `json:"maxFailuresTotal"` +} + +type jsonWorkflowTemplateJobsScheduling WorkflowTemplateJobsScheduling + +func (r *WorkflowTemplateJobsScheduling) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsScheduling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsScheduling + } else { + + r.MaxFailuresPerHour = res.MaxFailuresPerHour + + r.MaxFailuresTotal = res.MaxFailuresTotal + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsScheduling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsScheduling *WorkflowTemplateJobsScheduling = &WorkflowTemplateJobsScheduling{empty: true} + +func (r *WorkflowTemplateJobsScheduling) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsScheduling) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsScheduling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParameters struct { + empty bool `json:"-"` + Name *string `json:"name"` + Fields []string `json:"fields"` + Description *string `json:"description"` + Validation *WorkflowTemplateParametersValidation `json:"validation"` +} + +type jsonWorkflowTemplateParameters WorkflowTemplateParameters + +func (r *WorkflowTemplateParameters) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParameters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParameters + } else { + + r.Name = res.Name + + r.Fields = res.Fields + + r.Description = res.Description + + r.Validation = res.Validation + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParameters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParameters *WorkflowTemplateParameters = &WorkflowTemplateParameters{empty: true} + +func (r *WorkflowTemplateParameters) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParameters) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParameters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParametersValidation struct { + empty bool `json:"-"` + Regex *WorkflowTemplateParametersValidationRegex `json:"regex"` + Values *WorkflowTemplateParametersValidationValues `json:"values"` +} + +type jsonWorkflowTemplateParametersValidation WorkflowTemplateParametersValidation + +func (r *WorkflowTemplateParametersValidation) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParametersValidation + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParametersValidation + } else { + + r.Regex = res.Regex + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParametersValidation is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParametersValidation *WorkflowTemplateParametersValidation = &WorkflowTemplateParametersValidation{empty: true} + +func (r *WorkflowTemplateParametersValidation) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParametersValidation) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParametersValidation) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParametersValidationRegex struct { + empty bool `json:"-"` + Regexes []string `json:"regexes"` +} + +type jsonWorkflowTemplateParametersValidationRegex WorkflowTemplateParametersValidationRegex + +func (r *WorkflowTemplateParametersValidationRegex) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParametersValidationRegex + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParametersValidationRegex + } else { + + r.Regexes = res.Regexes + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParametersValidationRegex is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParametersValidationRegex *WorkflowTemplateParametersValidationRegex = &WorkflowTemplateParametersValidationRegex{empty: true} + +func (r *WorkflowTemplateParametersValidationRegex) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParametersValidationRegex) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParametersValidationRegex) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParametersValidationValues struct { + empty bool `json:"-"` + Values []string `json:"values"` +} + +type jsonWorkflowTemplateParametersValidationValues WorkflowTemplateParametersValidationValues + +func (r *WorkflowTemplateParametersValidationValues) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParametersValidationValues + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParametersValidationValues + } else { + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParametersValidationValues is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParametersValidationValues *WorkflowTemplateParametersValidationValues = &WorkflowTemplateParametersValidationValues{empty: true} + +func (r *WorkflowTemplateParametersValidationValues) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParametersValidationValues) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParametersValidationValues) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *WorkflowTemplate) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataproc", + Type: "WorkflowTemplate", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataproc", +{{- end }} + } +} + +func (r *WorkflowTemplate) ID() (string, error) { + if err := extractWorkflowTemplateFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "version": dcl.ValueOrEmptyString(nr.Version), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "encryption_config": dcl.ValueOrEmptyString(nr.EncryptionConfig), + "placement": dcl.ValueOrEmptyString(nr.Placement), + "jobs": dcl.ValueOrEmptyString(nr.Jobs), + "parameters": dcl.ValueOrEmptyString(nr.Parameters), + "dag_timeout": dcl.ValueOrEmptyString(nr.DagTimeout), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const WorkflowTemplateMaxPage = -1 + +type WorkflowTemplateList struct { + Items []*WorkflowTemplate + + nextToken string + + pageSize int32 + + resource *WorkflowTemplate +} + +func (l *WorkflowTemplateList) HasNext() bool { + return l.nextToken != "" +} + +func (l *WorkflowTemplateList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listWorkflowTemplate(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListWorkflowTemplate(ctx context.Context, project, location string) (*WorkflowTemplateList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListWorkflowTemplateWithMaxResults(ctx, project, location, WorkflowTemplateMaxPage) + +} + +func (c *Client) ListWorkflowTemplateWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*WorkflowTemplateList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &WorkflowTemplate{ + Project: &project, + Location: &location, + } + items, token, err := c.listWorkflowTemplate(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &WorkflowTemplateList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetWorkflowTemplate(ctx context.Context, r *WorkflowTemplate) (*WorkflowTemplate, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractWorkflowTemplateFields(r) + + b, err := c.getWorkflowTemplateRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalWorkflowTemplate(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeWorkflowTemplateNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractWorkflowTemplateFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteWorkflowTemplate(ctx context.Context, r *WorkflowTemplate) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("WorkflowTemplate resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting WorkflowTemplate...") + deleteOp := deleteWorkflowTemplateOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllWorkflowTemplate deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllWorkflowTemplate(ctx context.Context, project, location string, filter func(*WorkflowTemplate) bool) error { + listObj, err := c.ListWorkflowTemplate(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllWorkflowTemplate(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllWorkflowTemplate(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyWorkflowTemplate(ctx context.Context, rawDesired *WorkflowTemplate, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *WorkflowTemplate + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyWorkflowTemplateHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyWorkflowTemplateHelper(c *Client, ctx context.Context, rawDesired *WorkflowTemplate, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyWorkflowTemplate...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractWorkflowTemplateFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.workflowTemplateDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToWorkflowTemplateDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []workflowTemplateApiOperation + if create { + ops = append(ops, &createWorkflowTemplateOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyWorkflowTemplateDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyWorkflowTemplateDiff(c *Client, ctx context.Context, desired *WorkflowTemplate, rawDesired *WorkflowTemplate, ops []workflowTemplateApiOperation, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetWorkflowTemplate(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createWorkflowTemplateOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapWorkflowTemplate(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeWorkflowTemplateNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeWorkflowTemplateNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeWorkflowTemplateDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractWorkflowTemplateFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractWorkflowTemplateFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffWorkflowTemplate(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl new file mode 100644 index 000000000000..3faedcb84a9d --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl @@ -0,0 +1,20443 @@ +package dataproc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *WorkflowTemplate) validate() error { + + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.Required(r, "placement"); err != nil { + return err + } + if err := dcl.Required(r, "jobs"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.EncryptionConfig) { + if err := r.EncryptionConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Placement) { + if err := r.Placement.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateEncryptionConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacement) validate() error { + if !dcl.IsEmptyValueIndirect(r.ManagedCluster) { + if err := r.ManagedCluster.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ClusterSelector) { + if err := r.ClusterSelector.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedCluster) validate() error { + if err := dcl.Required(r, "clusterName"); err != nil { + return err + } + if err := dcl.Required(r, "config"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.GceClusterConfig) { + if err := r.GceClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MasterConfig) { + if err := r.MasterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecondaryWorkerConfig) { + if err := r.SecondaryWorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SoftwareConfig) { + if err := r.SoftwareConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EncryptionConfig) { + if err := r.EncryptionConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AutoscalingConfig) { + if err := r.AutoscalingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecurityConfig) { + if err := r.SecurityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LifecycleConfig) { + if err := r.LifecycleConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EndpointConfig) { + if err := r.EndpointConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.GkeClusterConfig) { + if err := r.GkeClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MetastoreConfig) { + if err := r.MetastoreConfig.validate(); err != nil { + return err + } + } +{{- end }} + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ReservationAffinity) { + if err := r.ReservationAffinity.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NodeGroupAffinity) { + if err := r.NodeGroupAffinity.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ShieldedInstanceConfig) { + if err := r.ShieldedInstanceConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) validate() error { + if err := dcl.Required(r, "nodeGroup"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.KerberosConfig) { + if err := r.KerberosConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) validate() error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.NamespacedGkeDeploymentTarget) { + if err := r.NamespacedGkeDeploymentTarget.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) validate() error { + if err := dcl.Required(r, "dataprocMetastoreService"); err != nil { + return err + } + return nil +} +{{- end }} +func (r *WorkflowTemplatePlacementClusterSelector) validate() error { + if err := dcl.Required(r, "clusterLabels"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobs) validate() error { + if err := dcl.Required(r, "stepId"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.HadoopJob) { + if err := r.HadoopJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkJob) { + if err := r.SparkJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PysparkJob) { + if err := r.PysparkJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.HiveJob) { + if err := r.HiveJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PigJob) { + if err := r.PigJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkRJob) { + if err := r.SparkRJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkSqlJob) { + if err := r.SparkSqlJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PrestoJob) { + if err := r.PrestoJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Scheduling) { + if err := r.Scheduling.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsHadoopJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsSparkJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsPysparkJob) validate() error { + if err := dcl.Required(r, "mainPythonFileUri"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsHiveJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsHiveJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsPigJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsPigJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsPigJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsSparkRJob) validate() error { + if err := dcl.Required(r, "mainRFileUri"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsSparkSqlJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsPrestoJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsPrestoJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsScheduling) validate() error { + return nil +} +func (r *WorkflowTemplateParameters) validate() error { + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "fields"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Validation) { + if err := r.Validation.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateParametersValidation) validate() error { + if !dcl.IsEmptyValueIndirect(r.Regex) { + if err := r.Regex.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Values) { + if err := r.Values.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateParametersValidationRegex) validate() error { + if err := dcl.Required(r, "regexes"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateParametersValidationValues) validate() error { + if err := dcl.Required(r, "values"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplate) basePath() string { + params := map[string]interface{}{} +{{- if ne $.TargetVersionName "ga" }} + return dcl.Nprintf("https://dataproc.googleapis.com/v1beta2/", params) +{{- else }} + return dcl.Nprintf("https://dataproc.googleapis.com/v1/", params) +{{- end }} +} + +func (r *WorkflowTemplate) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *WorkflowTemplate) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkflowTemplate) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkflowTemplate) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// workflowTemplateApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type workflowTemplateApiOperation interface { + do(context.Context, *WorkflowTemplate, *Client) error +} + +func (c *Client) listWorkflowTemplateRaw(ctx context.Context, r *WorkflowTemplate, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != WorkflowTemplateMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listWorkflowTemplateOperation struct { + Templates []map[string]interface{} `json:"templates"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listWorkflowTemplate(ctx context.Context, r *WorkflowTemplate, pageToken string, pageSize int32) ([]*WorkflowTemplate, string, error) { + b, err := c.listWorkflowTemplateRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listWorkflowTemplateOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*WorkflowTemplate + for _, v := range m.Templates { + res, err := unmarshalMapWorkflowTemplate(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllWorkflowTemplate(ctx context.Context, f func(*WorkflowTemplate) bool, resources []*WorkflowTemplate) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteWorkflowTemplate(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteWorkflowTemplateOperation struct{} + +func (op *deleteWorkflowTemplateOperation) do(ctx context.Context, r *WorkflowTemplate, c *Client) error { + r, err := c.GetWorkflowTemplate(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "WorkflowTemplate not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetWorkflowTemplate checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete WorkflowTemplate: %w", err) + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetWorkflowTemplate(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createWorkflowTemplateOperation struct { + response map[string]interface{} +} + +func (op *createWorkflowTemplateOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createWorkflowTemplateOperation) do(ctx context.Context, r *WorkflowTemplate, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + var m map[string]interface{} + if err := json.Unmarshal(req, &m); err != nil { + return err + } + normalized := r.urlNormalized() + m["id"] = fmt.Sprintf("%s", *normalized.Name) + + req, err = json.Marshal(m) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + if _, err := c.GetWorkflowTemplate(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getWorkflowTemplateRaw(ctx context.Context, r *WorkflowTemplate) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) workflowTemplateDiffsForRawDesired(ctx context.Context, rawDesired *WorkflowTemplate, opts ...dcl.ApplyOption) (initial, desired *WorkflowTemplate, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *WorkflowTemplate + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*WorkflowTemplate); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected WorkflowTemplate, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetWorkflowTemplate(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a WorkflowTemplate resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve WorkflowTemplate resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that WorkflowTemplate resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeWorkflowTemplateDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for WorkflowTemplate: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for WorkflowTemplate: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractWorkflowTemplateFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeWorkflowTemplateInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for WorkflowTemplate: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeWorkflowTemplateDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for WorkflowTemplate: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffWorkflowTemplate(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeWorkflowTemplateInitialState(rawInitial, rawDesired *WorkflowTemplate) (*WorkflowTemplate, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeWorkflowTemplateDesiredState(rawDesired, rawInitial *WorkflowTemplate, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.EncryptionConfig = canonicalizeWorkflowTemplateEncryptionConfig(rawDesired.EncryptionConfig, nil, opts...) + rawDesired.Placement = canonicalizeWorkflowTemplatePlacement(rawDesired.Placement, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &WorkflowTemplate{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.EncryptionConfig = canonicalizeWorkflowTemplateEncryptionConfig(rawDesired.EncryptionConfig, rawInitial.EncryptionConfig, opts...) + canonicalDesired.Placement = canonicalizeWorkflowTemplatePlacement(rawDesired.Placement, rawInitial.Placement, opts...) + canonicalDesired.Jobs = canonicalizeWorkflowTemplateJobsSlice(rawDesired.Jobs, rawInitial.Jobs, opts...) + canonicalDesired.Parameters = canonicalizeWorkflowTemplateParametersSlice(rawDesired.Parameters, rawInitial.Parameters, opts...) + if dcl.StringCanonicalize(rawDesired.DagTimeout, rawInitial.DagTimeout) { + canonicalDesired.DagTimeout = rawInitial.DagTimeout + } else { + canonicalDesired.DagTimeout = rawDesired.DagTimeout + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeWorkflowTemplateNewState(c *Client, rawNew, rawDesired *WorkflowTemplate) (*WorkflowTemplate, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) { + rawNew.Version = rawDesired.Version + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.EncryptionConfig) && dcl.IsEmptyValueIndirect(rawDesired.EncryptionConfig) { + rawNew.EncryptionConfig = rawDesired.EncryptionConfig + } else { + rawNew.EncryptionConfig = canonicalizeNewWorkflowTemplateEncryptionConfig(c, rawDesired.EncryptionConfig, rawNew.EncryptionConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.Placement) && dcl.IsEmptyValueIndirect(rawDesired.Placement) { + rawNew.Placement = rawDesired.Placement + } else { + rawNew.Placement = canonicalizeNewWorkflowTemplatePlacement(c, rawDesired.Placement, rawNew.Placement) + } + + if dcl.IsEmptyValueIndirect(rawNew.Jobs) && dcl.IsEmptyValueIndirect(rawDesired.Jobs) { + rawNew.Jobs = rawDesired.Jobs + } else { + rawNew.Jobs = canonicalizeNewWorkflowTemplateJobsSlice(c, rawDesired.Jobs, rawNew.Jobs) + } + + if dcl.IsEmptyValueIndirect(rawNew.Parameters) && dcl.IsEmptyValueIndirect(rawDesired.Parameters) { + rawNew.Parameters = rawDesired.Parameters + } else { + rawNew.Parameters = canonicalizeNewWorkflowTemplateParametersSlice(c, rawDesired.Parameters, rawNew.Parameters) + } + + if dcl.IsEmptyValueIndirect(rawNew.DagTimeout) && dcl.IsEmptyValueIndirect(rawDesired.DagTimeout) { + rawNew.DagTimeout = rawDesired.DagTimeout + } else { + if dcl.StringCanonicalize(rawDesired.DagTimeout, rawNew.DagTimeout) { + rawNew.DagTimeout = rawDesired.DagTimeout + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeWorkflowTemplateEncryptionConfig(des, initial *WorkflowTemplateEncryptionConfig, opts ...dcl.ApplyOption) *WorkflowTemplateEncryptionConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateEncryptionConfig{} + + if dcl.IsZeroValue(des.KmsKey) || (dcl.IsEmptyValueIndirect(des.KmsKey) && dcl.IsEmptyValueIndirect(initial.KmsKey)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KmsKey = initial.KmsKey + } else { + cDes.KmsKey = des.KmsKey + } + + return cDes +} + +func canonicalizeWorkflowTemplateEncryptionConfigSlice(des, initial []WorkflowTemplateEncryptionConfig, opts ...dcl.ApplyOption) []WorkflowTemplateEncryptionConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateEncryptionConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateEncryptionConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateEncryptionConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateEncryptionConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateEncryptionConfig(c *Client, des, nw *WorkflowTemplateEncryptionConfig) *WorkflowTemplateEncryptionConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateEncryptionConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateEncryptionConfigSet(c *Client, des, nw []WorkflowTemplateEncryptionConfig) []WorkflowTemplateEncryptionConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateEncryptionConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateEncryptionConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateEncryptionConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateEncryptionConfigSlice(c *Client, des, nw []WorkflowTemplateEncryptionConfig) []WorkflowTemplateEncryptionConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateEncryptionConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateEncryptionConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacement(des, initial *WorkflowTemplatePlacement, opts ...dcl.ApplyOption) *WorkflowTemplatePlacement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacement{} + + cDes.ManagedCluster = canonicalizeWorkflowTemplatePlacementManagedCluster(des.ManagedCluster, initial.ManagedCluster, opts...) + cDes.ClusterSelector = canonicalizeWorkflowTemplatePlacementClusterSelector(des.ClusterSelector, initial.ClusterSelector, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementSlice(des, initial []WorkflowTemplatePlacement, opts ...dcl.ApplyOption) []WorkflowTemplatePlacement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacement, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacement, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacement(c *Client, des, nw *WorkflowTemplatePlacement) *WorkflowTemplatePlacement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ManagedCluster = canonicalizeNewWorkflowTemplatePlacementManagedCluster(c, des.ManagedCluster, nw.ManagedCluster) + nw.ClusterSelector = canonicalizeNewWorkflowTemplatePlacementClusterSelector(c, des.ClusterSelector, nw.ClusterSelector) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementSet(c *Client, des, nw []WorkflowTemplatePlacement) []WorkflowTemplatePlacement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementSlice(c *Client, des, nw []WorkflowTemplatePlacement) []WorkflowTemplatePlacement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacement(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedCluster(des, initial *WorkflowTemplatePlacementManagedCluster, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedCluster { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedCluster{} + + if dcl.StringCanonicalize(des.ClusterName, initial.ClusterName) || dcl.IsZeroValue(des.ClusterName) { + cDes.ClusterName = initial.ClusterName + } else { + cDes.ClusterName = des.ClusterName + } + cDes.Config = canonicalizeWorkflowTemplatePlacementManagedClusterConfig(des.Config, initial.Config, opts...) + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterSlice(des, initial []WorkflowTemplatePlacementManagedCluster, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedCluster { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedCluster, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedCluster(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedCluster, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedCluster(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedCluster(c *Client, des, nw *WorkflowTemplatePlacementManagedCluster) *WorkflowTemplatePlacementManagedCluster { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedCluster while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ClusterName, nw.ClusterName) { + nw.ClusterName = des.ClusterName + } + nw.Config = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c, des.Config, nw.Config) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterSet(c *Client, des, nw []WorkflowTemplatePlacementManagedCluster) []WorkflowTemplatePlacementManagedCluster { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedCluster + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedCluster(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedCluster) []WorkflowTemplatePlacementManagedCluster { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedCluster + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedCluster(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfig{} + + if dcl.IsZeroValue(des.StagingBucket) || (dcl.IsEmptyValueIndirect(des.StagingBucket) && dcl.IsEmptyValueIndirect(initial.StagingBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.StagingBucket = initial.StagingBucket + } else { + cDes.StagingBucket = des.StagingBucket + } + if dcl.IsZeroValue(des.TempBucket) || (dcl.IsEmptyValueIndirect(des.TempBucket) && dcl.IsEmptyValueIndirect(initial.TempBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TempBucket = initial.TempBucket + } else { + cDes.TempBucket = des.TempBucket + } + cDes.GceClusterConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(des.GceClusterConfig, initial.GceClusterConfig, opts...) + cDes.MasterConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(des.MasterConfig, initial.MasterConfig, opts...) + cDes.WorkerConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(des.WorkerConfig, initial.WorkerConfig, opts...) + cDes.SecondaryWorkerConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(des.SecondaryWorkerConfig, initial.SecondaryWorkerConfig, opts...) + cDes.SoftwareConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(des.SoftwareConfig, initial.SoftwareConfig, opts...) + cDes.InitializationActions = canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(des.InitializationActions, initial.InitializationActions, opts...) + cDes.EncryptionConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(des.EncryptionConfig, initial.EncryptionConfig, opts...) + cDes.AutoscalingConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(des.AutoscalingConfig, initial.AutoscalingConfig, opts...) + cDes.SecurityConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(des.SecurityConfig, initial.SecurityConfig, opts...) + cDes.LifecycleConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(des.LifecycleConfig, initial.LifecycleConfig, opts...) + cDes.EndpointConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(des.EndpointConfig, initial.EndpointConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.GkeClusterConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(des.GkeClusterConfig, initial.GkeClusterConfig, opts...) + cDes.MetastoreConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(des.MetastoreConfig, initial.MetastoreConfig, opts...) +{{- end }} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfig) *WorkflowTemplatePlacementManagedClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GceClusterConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, des.GceClusterConfig, nw.GceClusterConfig) + nw.MasterConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, des.MasterConfig, nw.MasterConfig) + nw.WorkerConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, des.WorkerConfig, nw.WorkerConfig) + nw.SecondaryWorkerConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, des.SecondaryWorkerConfig, nw.SecondaryWorkerConfig) + nw.SoftwareConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, des.SoftwareConfig, nw.SoftwareConfig) + nw.InitializationActions = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c, des.InitializationActions, nw.InitializationActions) + nw.EncryptionConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, des.EncryptionConfig, nw.EncryptionConfig) + nw.AutoscalingConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, des.AutoscalingConfig, nw.AutoscalingConfig) + nw.SecurityConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, des.SecurityConfig, nw.SecurityConfig) + nw.LifecycleConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, des.LifecycleConfig, nw.LifecycleConfig) + nw.EndpointConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, des.EndpointConfig, nw.EndpointConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.GkeClusterConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, des.GkeClusterConfig, nw.GkeClusterConfig) + nw.MetastoreConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, des.MetastoreConfig, nw.MetastoreConfig) +{{- end }} + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfig) []WorkflowTemplatePlacementManagedClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfig) []WorkflowTemplatePlacementManagedClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + + if dcl.StringCanonicalize(des.Zone, initial.Zone) || dcl.IsZeroValue(des.Zone) { + cDes.Zone = initial.Zone + } else { + cDes.Zone = des.Zone + } + if dcl.IsZeroValue(des.Network) || (dcl.IsEmptyValueIndirect(des.Network) && dcl.IsEmptyValueIndirect(initial.Network)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Network = initial.Network + } else { + cDes.Network = des.Network + } + if dcl.IsZeroValue(des.Subnetwork) || (dcl.IsEmptyValueIndirect(des.Subnetwork) && dcl.IsEmptyValueIndirect(initial.Subnetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Subnetwork = initial.Subnetwork + } else { + cDes.Subnetwork = des.Subnetwork + } + if dcl.BoolCanonicalize(des.InternalIPOnly, initial.InternalIPOnly) || dcl.IsZeroValue(des.InternalIPOnly) { + cDes.InternalIPOnly = initial.InternalIPOnly + } else { + cDes.InternalIPOnly = des.InternalIPOnly + } + if dcl.IsZeroValue(des.PrivateIPv6GoogleAccess) || (dcl.IsEmptyValueIndirect(des.PrivateIPv6GoogleAccess) && dcl.IsEmptyValueIndirect(initial.PrivateIPv6GoogleAccess)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PrivateIPv6GoogleAccess = initial.PrivateIPv6GoogleAccess + } else { + cDes.PrivateIPv6GoogleAccess = des.PrivateIPv6GoogleAccess + } + if dcl.IsZeroValue(des.ServiceAccount) || (dcl.IsEmptyValueIndirect(des.ServiceAccount) && dcl.IsEmptyValueIndirect(initial.ServiceAccount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ServiceAccount = initial.ServiceAccount + } else { + cDes.ServiceAccount = des.ServiceAccount + } + if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, initial.ServiceAccountScopes) { + cDes.ServiceAccountScopes = initial.ServiceAccountScopes + } else { + cDes.ServiceAccountScopes = des.ServiceAccountScopes + } + if dcl.StringArrayCanonicalize(des.Tags, initial.Tags) { + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.IsZeroValue(des.Metadata) || (dcl.IsEmptyValueIndirect(des.Metadata) && dcl.IsEmptyValueIndirect(initial.Metadata)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Metadata = initial.Metadata + } else { + cDes.Metadata = des.Metadata + } + cDes.ReservationAffinity = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(des.ReservationAffinity, initial.ReservationAffinity, opts...) + cDes.NodeGroupAffinity = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(des.NodeGroupAffinity, initial.NodeGroupAffinity, opts...) + cDes.ShieldedInstanceConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(des.ShieldedInstanceConfig, initial.ShieldedInstanceConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Zone, nw.Zone) { + nw.Zone = des.Zone + } + if dcl.BoolCanonicalize(des.InternalIPOnly, nw.InternalIPOnly) { + nw.InternalIPOnly = des.InternalIPOnly + } + if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, nw.ServiceAccountScopes) { + nw.ServiceAccountScopes = des.ServiceAccountScopes + } + if dcl.StringArrayCanonicalize(des.Tags, nw.Tags) { + nw.Tags = des.Tags + } + nw.ReservationAffinity = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, des.ReservationAffinity, nw.ReservationAffinity) + nw.NodeGroupAffinity = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, des.NodeGroupAffinity, nw.NodeGroupAffinity) + nw.ShieldedInstanceConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, des.ShieldedInstanceConfig, nw.ShieldedInstanceConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + + if dcl.IsZeroValue(des.ConsumeReservationType) || (dcl.IsEmptyValueIndirect(des.ConsumeReservationType) && dcl.IsEmptyValueIndirect(initial.ConsumeReservationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ConsumeReservationType = initial.ConsumeReservationType + } else { + cDes.ConsumeReservationType = des.ConsumeReservationType + } + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringArrayCanonicalize(des.Values, initial.Values) { + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringArrayCanonicalize(des.Values, nw.Values) { + nw.Values = des.Values + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + + if dcl.IsZeroValue(des.NodeGroup) || (dcl.IsEmptyValueIndirect(des.NodeGroup) && dcl.IsEmptyValueIndirect(initial.NodeGroup)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NodeGroup = initial.NodeGroup + } else { + cDes.NodeGroup = des.NodeGroup + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + + if dcl.BoolCanonicalize(des.EnableSecureBoot, initial.EnableSecureBoot) || dcl.IsZeroValue(des.EnableSecureBoot) { + cDes.EnableSecureBoot = initial.EnableSecureBoot + } else { + cDes.EnableSecureBoot = des.EnableSecureBoot + } + if dcl.BoolCanonicalize(des.EnableVtpm, initial.EnableVtpm) || dcl.IsZeroValue(des.EnableVtpm) { + cDes.EnableVtpm = initial.EnableVtpm + } else { + cDes.EnableVtpm = des.EnableVtpm + } + if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, initial.EnableIntegrityMonitoring) || dcl.IsZeroValue(des.EnableIntegrityMonitoring) { + cDes.EnableIntegrityMonitoring = initial.EnableIntegrityMonitoring + } else { + cDes.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableSecureBoot, nw.EnableSecureBoot) { + nw.EnableSecureBoot = des.EnableSecureBoot + } + if dcl.BoolCanonicalize(des.EnableVtpm, nw.EnableVtpm) { + nw.EnableVtpm = des.EnableVtpm + } + if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, nw.EnableIntegrityMonitoring) { + nw.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + + if dcl.StringCanonicalize(des.ImageVersion, initial.ImageVersion) || dcl.IsZeroValue(des.ImageVersion) { + cDes.ImageVersion = initial.ImageVersion + } else { + cDes.ImageVersion = des.ImageVersion + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.IsZeroValue(des.OptionalComponents) || (dcl.IsEmptyValueIndirect(des.OptionalComponents) && dcl.IsEmptyValueIndirect(initial.OptionalComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.OptionalComponents = initial.OptionalComponents + } else { + cDes.OptionalComponents = des.OptionalComponents + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ImageVersion, nw.ImageVersion) { + nw.ImageVersion = des.ImageVersion + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActions(des, initial *WorkflowTemplatePlacementManagedClusterConfigInitializationActions, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + + if dcl.StringCanonicalize(des.ExecutableFile, initial.ExecutableFile) || dcl.IsZeroValue(des.ExecutableFile) { + cDes.ExecutableFile = initial.ExecutableFile + } else { + cDes.ExecutableFile = des.ExecutableFile + } + if dcl.StringCanonicalize(des.ExecutionTimeout, initial.ExecutionTimeout) || dcl.IsZeroValue(des.ExecutionTimeout) { + cDes.ExecutionTimeout = initial.ExecutionTimeout + } else { + cDes.ExecutionTimeout = des.ExecutionTimeout + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigInitializationActions, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigInitializationActions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ExecutableFile, nw.ExecutableFile) { + nw.ExecutableFile = des.ExecutableFile + } + if dcl.StringCanonicalize(des.ExecutionTimeout, nw.ExecutionTimeout) { + nw.ExecutionTimeout = des.ExecutionTimeout + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigInitializationActions) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigInitializationActions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigInitializationActionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigInitializationActions) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigInitializationActions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + + if dcl.IsZeroValue(des.GcePdKmsKeyName) || (dcl.IsEmptyValueIndirect(des.GcePdKmsKeyName) && dcl.IsEmptyValueIndirect(initial.GcePdKmsKeyName)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcePdKmsKeyName = initial.GcePdKmsKeyName + } else { + cDes.GcePdKmsKeyName = des.GcePdKmsKeyName + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + + if dcl.IsZeroValue(des.Policy) || (dcl.IsEmptyValueIndirect(des.Policy) && dcl.IsEmptyValueIndirect(initial.Policy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Policy = initial.Policy + } else { + cDes.Policy = des.Policy + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + + cDes.KerberosConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(des.KerberosConfig, initial.KerberosConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecurityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.KerberosConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, des.KerberosConfig, nw.KerberosConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + + if dcl.BoolCanonicalize(des.EnableKerberos, initial.EnableKerberos) || dcl.IsZeroValue(des.EnableKerberos) { + cDes.EnableKerberos = initial.EnableKerberos + } else { + cDes.EnableKerberos = des.EnableKerberos + } + if dcl.StringCanonicalize(des.RootPrincipalPassword, initial.RootPrincipalPassword) || dcl.IsZeroValue(des.RootPrincipalPassword) { + cDes.RootPrincipalPassword = initial.RootPrincipalPassword + } else { + cDes.RootPrincipalPassword = des.RootPrincipalPassword + } + if dcl.IsZeroValue(des.KmsKey) || (dcl.IsEmptyValueIndirect(des.KmsKey) && dcl.IsEmptyValueIndirect(initial.KmsKey)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KmsKey = initial.KmsKey + } else { + cDes.KmsKey = des.KmsKey + } + if dcl.StringCanonicalize(des.Keystore, initial.Keystore) || dcl.IsZeroValue(des.Keystore) { + cDes.Keystore = initial.Keystore + } else { + cDes.Keystore = des.Keystore + } + if dcl.StringCanonicalize(des.Truststore, initial.Truststore) || dcl.IsZeroValue(des.Truststore) { + cDes.Truststore = initial.Truststore + } else { + cDes.Truststore = des.Truststore + } + if dcl.StringCanonicalize(des.KeystorePassword, initial.KeystorePassword) || dcl.IsZeroValue(des.KeystorePassword) { + cDes.KeystorePassword = initial.KeystorePassword + } else { + cDes.KeystorePassword = des.KeystorePassword + } + if dcl.StringCanonicalize(des.KeyPassword, initial.KeyPassword) || dcl.IsZeroValue(des.KeyPassword) { + cDes.KeyPassword = initial.KeyPassword + } else { + cDes.KeyPassword = des.KeyPassword + } + if dcl.StringCanonicalize(des.TruststorePassword, initial.TruststorePassword) || dcl.IsZeroValue(des.TruststorePassword) { + cDes.TruststorePassword = initial.TruststorePassword + } else { + cDes.TruststorePassword = des.TruststorePassword + } + if dcl.StringCanonicalize(des.CrossRealmTrustRealm, initial.CrossRealmTrustRealm) || dcl.IsZeroValue(des.CrossRealmTrustRealm) { + cDes.CrossRealmTrustRealm = initial.CrossRealmTrustRealm + } else { + cDes.CrossRealmTrustRealm = des.CrossRealmTrustRealm + } + if dcl.StringCanonicalize(des.CrossRealmTrustKdc, initial.CrossRealmTrustKdc) || dcl.IsZeroValue(des.CrossRealmTrustKdc) { + cDes.CrossRealmTrustKdc = initial.CrossRealmTrustKdc + } else { + cDes.CrossRealmTrustKdc = des.CrossRealmTrustKdc + } + if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, initial.CrossRealmTrustAdminServer) || dcl.IsZeroValue(des.CrossRealmTrustAdminServer) { + cDes.CrossRealmTrustAdminServer = initial.CrossRealmTrustAdminServer + } else { + cDes.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer + } + if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, initial.CrossRealmTrustSharedPassword) || dcl.IsZeroValue(des.CrossRealmTrustSharedPassword) { + cDes.CrossRealmTrustSharedPassword = initial.CrossRealmTrustSharedPassword + } else { + cDes.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword + } + if dcl.StringCanonicalize(des.KdcDbKey, initial.KdcDbKey) || dcl.IsZeroValue(des.KdcDbKey) { + cDes.KdcDbKey = initial.KdcDbKey + } else { + cDes.KdcDbKey = des.KdcDbKey + } + if dcl.IsZeroValue(des.TgtLifetimeHours) || (dcl.IsEmptyValueIndirect(des.TgtLifetimeHours) && dcl.IsEmptyValueIndirect(initial.TgtLifetimeHours)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TgtLifetimeHours = initial.TgtLifetimeHours + } else { + cDes.TgtLifetimeHours = des.TgtLifetimeHours + } + if dcl.StringCanonicalize(des.Realm, initial.Realm) || dcl.IsZeroValue(des.Realm) { + cDes.Realm = initial.Realm + } else { + cDes.Realm = des.Realm + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableKerberos, nw.EnableKerberos) { + nw.EnableKerberos = des.EnableKerberos + } + if dcl.StringCanonicalize(des.RootPrincipalPassword, nw.RootPrincipalPassword) { + nw.RootPrincipalPassword = des.RootPrincipalPassword + } + if dcl.StringCanonicalize(des.Keystore, nw.Keystore) { + nw.Keystore = des.Keystore + } + if dcl.StringCanonicalize(des.Truststore, nw.Truststore) { + nw.Truststore = des.Truststore + } + if dcl.StringCanonicalize(des.KeystorePassword, nw.KeystorePassword) { + nw.KeystorePassword = des.KeystorePassword + } + if dcl.StringCanonicalize(des.KeyPassword, nw.KeyPassword) { + nw.KeyPassword = des.KeyPassword + } + if dcl.StringCanonicalize(des.TruststorePassword, nw.TruststorePassword) { + nw.TruststorePassword = des.TruststorePassword + } + if dcl.StringCanonicalize(des.CrossRealmTrustRealm, nw.CrossRealmTrustRealm) { + nw.CrossRealmTrustRealm = des.CrossRealmTrustRealm + } + if dcl.StringCanonicalize(des.CrossRealmTrustKdc, nw.CrossRealmTrustKdc) { + nw.CrossRealmTrustKdc = des.CrossRealmTrustKdc + } + if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, nw.CrossRealmTrustAdminServer) { + nw.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer + } + if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, nw.CrossRealmTrustSharedPassword) { + nw.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword + } + if dcl.StringCanonicalize(des.KdcDbKey, nw.KdcDbKey) { + nw.KdcDbKey = des.KdcDbKey + } + if dcl.StringCanonicalize(des.Realm, nw.Realm) { + nw.Realm = des.Realm + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + + if dcl.StringCanonicalize(des.IdleDeleteTtl, initial.IdleDeleteTtl) || dcl.IsZeroValue(des.IdleDeleteTtl) { + cDes.IdleDeleteTtl = initial.IdleDeleteTtl + } else { + cDes.IdleDeleteTtl = des.IdleDeleteTtl + } + if dcl.IsZeroValue(des.AutoDeleteTime) || (dcl.IsEmptyValueIndirect(des.AutoDeleteTime) && dcl.IsEmptyValueIndirect(initial.AutoDeleteTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AutoDeleteTime = initial.AutoDeleteTime + } else { + cDes.AutoDeleteTime = des.AutoDeleteTime + } + if dcl.StringCanonicalize(des.AutoDeleteTtl, initial.AutoDeleteTtl) || dcl.IsZeroValue(des.AutoDeleteTtl) { + cDes.AutoDeleteTtl = initial.AutoDeleteTtl + } else { + cDes.AutoDeleteTtl = des.AutoDeleteTtl + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IdleDeleteTtl, nw.IdleDeleteTtl) { + nw.IdleDeleteTtl = des.IdleDeleteTtl + } + if dcl.StringCanonicalize(des.AutoDeleteTtl, nw.AutoDeleteTtl) { + nw.AutoDeleteTtl = des.AutoDeleteTtl + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + + if dcl.BoolCanonicalize(des.EnableHttpPortAccess, initial.EnableHttpPortAccess) || dcl.IsZeroValue(des.EnableHttpPortAccess) { + cDes.EnableHttpPortAccess = initial.EnableHttpPortAccess + } else { + cDes.EnableHttpPortAccess = des.EnableHttpPortAccess + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigEndpointConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableHttpPortAccess, nw.EnableHttpPortAccess) { + nw.EnableHttpPortAccess = des.EnableHttpPortAccess + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigEndpointConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + + cDes.NamespacedGkeDeploymentTarget = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des.NamespacedGkeDeploymentTarget, initial.NamespacedGkeDeploymentTarget, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.NamespacedGkeDeploymentTarget = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, des.NamespacedGkeDeploymentTarget, nw.NamespacedGkeDeploymentTarget) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des, initial *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + + if dcl.IsZeroValue(des.TargetGkeCluster) || (dcl.IsEmptyValueIndirect(des.TargetGkeCluster) && dcl.IsEmptyValueIndirect(initial.TargetGkeCluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TargetGkeCluster = initial.TargetGkeCluster + } else { + cDes.TargetGkeCluster = des.TargetGkeCluster + } + if dcl.StringCanonicalize(des.ClusterNamespace, initial.ClusterNamespace) || dcl.IsZeroValue(des.ClusterNamespace) { + cDes.ClusterNamespace = initial.ClusterNamespace + } else { + cDes.ClusterNamespace = des.ClusterNamespace + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ClusterNamespace, nw.ClusterNamespace) { + nw.ClusterNamespace = des.ClusterNamespace + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + + if dcl.IsZeroValue(des.DataprocMetastoreService) || (dcl.IsEmptyValueIndirect(des.DataprocMetastoreService) && dcl.IsEmptyValueIndirect(initial.DataprocMetastoreService)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataprocMetastoreService = initial.DataprocMetastoreService + } else { + cDes.DataprocMetastoreService = des.DataprocMetastoreService + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &d, &n)) +{{- end }} + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementClusterSelector(des, initial *WorkflowTemplatePlacementClusterSelector, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementClusterSelector { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementClusterSelector{} + + if dcl.StringCanonicalize(des.Zone, initial.Zone) || dcl.IsZeroValue(des.Zone) { + cDes.Zone = initial.Zone + } else { + cDes.Zone = des.Zone + } + if dcl.IsZeroValue(des.ClusterLabels) || (dcl.IsEmptyValueIndirect(des.ClusterLabels) && dcl.IsEmptyValueIndirect(initial.ClusterLabels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ClusterLabels = initial.ClusterLabels + } else { + cDes.ClusterLabels = des.ClusterLabels + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementClusterSelectorSlice(des, initial []WorkflowTemplatePlacementClusterSelector, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementClusterSelector { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementClusterSelector, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementClusterSelector(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementClusterSelector, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementClusterSelector(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementClusterSelector(c *Client, des, nw *WorkflowTemplatePlacementClusterSelector) *WorkflowTemplatePlacementClusterSelector { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementClusterSelector while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Zone, nw.Zone) { + nw.Zone = des.Zone + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementClusterSelectorSet(c *Client, des, nw []WorkflowTemplatePlacementClusterSelector) []WorkflowTemplatePlacementClusterSelector { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementClusterSelector + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementClusterSelectorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementClusterSelector(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementClusterSelectorSlice(c *Client, des, nw []WorkflowTemplatePlacementClusterSelector) []WorkflowTemplatePlacementClusterSelector { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementClusterSelector + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementClusterSelector(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobs(des, initial *WorkflowTemplateJobs, opts ...dcl.ApplyOption) *WorkflowTemplateJobs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobs{} + + if dcl.StringCanonicalize(des.StepId, initial.StepId) || dcl.IsZeroValue(des.StepId) { + cDes.StepId = initial.StepId + } else { + cDes.StepId = des.StepId + } + cDes.HadoopJob = canonicalizeWorkflowTemplateJobsHadoopJob(des.HadoopJob, initial.HadoopJob, opts...) + cDes.SparkJob = canonicalizeWorkflowTemplateJobsSparkJob(des.SparkJob, initial.SparkJob, opts...) + cDes.PysparkJob = canonicalizeWorkflowTemplateJobsPysparkJob(des.PysparkJob, initial.PysparkJob, opts...) + cDes.HiveJob = canonicalizeWorkflowTemplateJobsHiveJob(des.HiveJob, initial.HiveJob, opts...) + cDes.PigJob = canonicalizeWorkflowTemplateJobsPigJob(des.PigJob, initial.PigJob, opts...) + cDes.SparkRJob = canonicalizeWorkflowTemplateJobsSparkRJob(des.SparkRJob, initial.SparkRJob, opts...) + cDes.SparkSqlJob = canonicalizeWorkflowTemplateJobsSparkSqlJob(des.SparkSqlJob, initial.SparkSqlJob, opts...) + cDes.PrestoJob = canonicalizeWorkflowTemplateJobsPrestoJob(des.PrestoJob, initial.PrestoJob, opts...) + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + cDes.Scheduling = canonicalizeWorkflowTemplateJobsScheduling(des.Scheduling, initial.Scheduling, opts...) + if dcl.StringArrayCanonicalize(des.PrerequisiteStepIds, initial.PrerequisiteStepIds) { + cDes.PrerequisiteStepIds = initial.PrerequisiteStepIds + } else { + cDes.PrerequisiteStepIds = des.PrerequisiteStepIds + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSlice(des, initial []WorkflowTemplateJobs, opts ...dcl.ApplyOption) []WorkflowTemplateJobs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobs, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobs, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobs(c *Client, des, nw *WorkflowTemplateJobs) *WorkflowTemplateJobs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.StepId, nw.StepId) { + nw.StepId = des.StepId + } + nw.HadoopJob = canonicalizeNewWorkflowTemplateJobsHadoopJob(c, des.HadoopJob, nw.HadoopJob) + nw.SparkJob = canonicalizeNewWorkflowTemplateJobsSparkJob(c, des.SparkJob, nw.SparkJob) + nw.PysparkJob = canonicalizeNewWorkflowTemplateJobsPysparkJob(c, des.PysparkJob, nw.PysparkJob) + nw.HiveJob = canonicalizeNewWorkflowTemplateJobsHiveJob(c, des.HiveJob, nw.HiveJob) + nw.PigJob = canonicalizeNewWorkflowTemplateJobsPigJob(c, des.PigJob, nw.PigJob) + nw.SparkRJob = canonicalizeNewWorkflowTemplateJobsSparkRJob(c, des.SparkRJob, nw.SparkRJob) + nw.SparkSqlJob = canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c, des.SparkSqlJob, nw.SparkSqlJob) + nw.PrestoJob = canonicalizeNewWorkflowTemplateJobsPrestoJob(c, des.PrestoJob, nw.PrestoJob) + nw.Scheduling = canonicalizeNewWorkflowTemplateJobsScheduling(c, des.Scheduling, nw.Scheduling) + if dcl.StringArrayCanonicalize(des.PrerequisiteStepIds, nw.PrerequisiteStepIds) { + nw.PrerequisiteStepIds = des.PrerequisiteStepIds + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSet(c *Client, des, nw []WorkflowTemplateJobs) []WorkflowTemplateJobs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSlice(c *Client, des, nw []WorkflowTemplateJobs) []WorkflowTemplateJobs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobs(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHadoopJob(des, initial *WorkflowTemplateJobsHadoopJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHadoopJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHadoopJob{} + + if dcl.StringCanonicalize(des.MainJarFileUri, initial.MainJarFileUri) || dcl.IsZeroValue(des.MainJarFileUri) { + cDes.MainJarFileUri = initial.MainJarFileUri + } else { + cDes.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, initial.MainClass) || dcl.IsZeroValue(des.MainClass) { + cDes.MainClass = initial.MainClass + } else { + cDes.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHadoopJobSlice(des, initial []WorkflowTemplateJobsHadoopJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHadoopJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHadoopJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHadoopJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJob(c *Client, des, nw *WorkflowTemplateJobsHadoopJob) *WorkflowTemplateJobsHadoopJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHadoopJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainJarFileUri, nw.MainJarFileUri) { + nw.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, nw.MainClass) { + nw.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobSet(c *Client, des, nw []WorkflowTemplateJobsHadoopJob) []WorkflowTemplateJobsHadoopJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHadoopJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHadoopJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobSlice(c *Client, des, nw []WorkflowTemplateJobsHadoopJob) []WorkflowTemplateJobsHadoopJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHadoopJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(des, initial *WorkflowTemplateJobsHadoopJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHadoopJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHadoopJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsHadoopJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHadoopJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHadoopJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHadoopJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsHadoopJobLoggingConfig) *WorkflowTemplateJobsHadoopJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHadoopJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsHadoopJobLoggingConfig) []WorkflowTemplateJobsHadoopJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHadoopJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHadoopJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsHadoopJobLoggingConfig) []WorkflowTemplateJobsHadoopJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHadoopJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkJob(des, initial *WorkflowTemplateJobsSparkJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkJob{} + + if dcl.StringCanonicalize(des.MainJarFileUri, initial.MainJarFileUri) || dcl.IsZeroValue(des.MainJarFileUri) { + cDes.MainJarFileUri = initial.MainJarFileUri + } else { + cDes.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, initial.MainClass) || dcl.IsZeroValue(des.MainClass) { + cDes.MainClass = initial.MainClass + } else { + cDes.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkJobSlice(des, initial []WorkflowTemplateJobsSparkJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkJob(c *Client, des, nw *WorkflowTemplateJobsSparkJob) *WorkflowTemplateJobsSparkJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainJarFileUri, nw.MainJarFileUri) { + nw.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, nw.MainClass) { + nw.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobSet(c *Client, des, nw []WorkflowTemplateJobsSparkJob) []WorkflowTemplateJobsSparkJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobSlice(c *Client, des, nw []WorkflowTemplateJobsSparkJob) []WorkflowTemplateJobsSparkJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(des, initial *WorkflowTemplateJobsSparkJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsSparkJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsSparkJobLoggingConfig) *WorkflowTemplateJobsSparkJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsSparkJobLoggingConfig) []WorkflowTemplateJobsSparkJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsSparkJobLoggingConfig) []WorkflowTemplateJobsSparkJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPysparkJob(des, initial *WorkflowTemplateJobsPysparkJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPysparkJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPysparkJob{} + + if dcl.StringCanonicalize(des.MainPythonFileUri, initial.MainPythonFileUri) || dcl.IsZeroValue(des.MainPythonFileUri) { + cDes.MainPythonFileUri = initial.MainPythonFileUri + } else { + cDes.MainPythonFileUri = des.MainPythonFileUri + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.PythonFileUris, initial.PythonFileUris) { + cDes.PythonFileUris = initial.PythonFileUris + } else { + cDes.PythonFileUris = des.PythonFileUris + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPysparkJobSlice(des, initial []WorkflowTemplateJobsPysparkJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPysparkJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPysparkJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPysparkJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJob(c *Client, des, nw *WorkflowTemplateJobsPysparkJob) *WorkflowTemplateJobsPysparkJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPysparkJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainPythonFileUri, nw.MainPythonFileUri) { + nw.MainPythonFileUri = des.MainPythonFileUri + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.PythonFileUris, nw.PythonFileUris) { + nw.PythonFileUris = des.PythonFileUris + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobSet(c *Client, des, nw []WorkflowTemplateJobsPysparkJob) []WorkflowTemplateJobsPysparkJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPysparkJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPysparkJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobSlice(c *Client, des, nw []WorkflowTemplateJobsPysparkJob) []WorkflowTemplateJobsPysparkJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPysparkJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(des, initial *WorkflowTemplateJobsPysparkJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPysparkJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPysparkJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsPysparkJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPysparkJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPysparkJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPysparkJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsPysparkJobLoggingConfig) *WorkflowTemplateJobsPysparkJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPysparkJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsPysparkJobLoggingConfig) []WorkflowTemplateJobsPysparkJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPysparkJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPysparkJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsPysparkJobLoggingConfig) []WorkflowTemplateJobsPysparkJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPysparkJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHiveJob(des, initial *WorkflowTemplateJobsHiveJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHiveJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHiveJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsHiveJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.BoolCanonicalize(des.ContinueOnFailure, initial.ContinueOnFailure) || dcl.IsZeroValue(des.ContinueOnFailure) { + cDes.ContinueOnFailure = initial.ContinueOnFailure + } else { + cDes.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.IsZeroValue(des.ScriptVariables) || (dcl.IsEmptyValueIndirect(des.ScriptVariables) && dcl.IsEmptyValueIndirect(initial.ScriptVariables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScriptVariables = initial.ScriptVariables + } else { + cDes.ScriptVariables = des.ScriptVariables + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHiveJobSlice(des, initial []WorkflowTemplateJobsHiveJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHiveJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHiveJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHiveJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHiveJob(c *Client, des, nw *WorkflowTemplateJobsHiveJob) *WorkflowTemplateJobsHiveJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHiveJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.BoolCanonicalize(des.ContinueOnFailure, nw.ContinueOnFailure) { + nw.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobSet(c *Client, des, nw []WorkflowTemplateJobsHiveJob) []WorkflowTemplateJobsHiveJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHiveJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHiveJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobSlice(c *Client, des, nw []WorkflowTemplateJobsHiveJob) []WorkflowTemplateJobsHiveJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHiveJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHiveJobQueryList(des, initial *WorkflowTemplateJobsHiveJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHiveJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHiveJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHiveJobQueryListSlice(des, initial []WorkflowTemplateJobsHiveJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHiveJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHiveJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHiveJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c *Client, des, nw *WorkflowTemplateJobsHiveJobQueryList) *WorkflowTemplateJobsHiveJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHiveJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsHiveJobQueryList) []WorkflowTemplateJobsHiveJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHiveJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHiveJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsHiveJobQueryList) []WorkflowTemplateJobsHiveJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHiveJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPigJob(des, initial *WorkflowTemplateJobsPigJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPigJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPigJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsPigJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.BoolCanonicalize(des.ContinueOnFailure, initial.ContinueOnFailure) || dcl.IsZeroValue(des.ContinueOnFailure) { + cDes.ContinueOnFailure = initial.ContinueOnFailure + } else { + cDes.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.IsZeroValue(des.ScriptVariables) || (dcl.IsEmptyValueIndirect(des.ScriptVariables) && dcl.IsEmptyValueIndirect(initial.ScriptVariables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScriptVariables = initial.ScriptVariables + } else { + cDes.ScriptVariables = des.ScriptVariables + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPigJobSlice(des, initial []WorkflowTemplateJobsPigJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPigJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPigJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPigJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPigJob(c *Client, des, nw *WorkflowTemplateJobsPigJob) *WorkflowTemplateJobsPigJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPigJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.BoolCanonicalize(des.ContinueOnFailure, nw.ContinueOnFailure) { + nw.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPigJobSet(c *Client, des, nw []WorkflowTemplateJobsPigJob) []WorkflowTemplateJobsPigJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPigJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPigJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPigJobSlice(c *Client, des, nw []WorkflowTemplateJobsPigJob) []WorkflowTemplateJobsPigJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPigJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPigJobQueryList(des, initial *WorkflowTemplateJobsPigJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPigJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPigJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPigJobQueryListSlice(des, initial []WorkflowTemplateJobsPigJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPigJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPigJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPigJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c *Client, des, nw *WorkflowTemplateJobsPigJobQueryList) *WorkflowTemplateJobsPigJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPigJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPigJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsPigJobQueryList) []WorkflowTemplateJobsPigJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPigJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPigJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPigJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsPigJobQueryList) []WorkflowTemplateJobsPigJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPigJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(des, initial *WorkflowTemplateJobsPigJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPigJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPigJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPigJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsPigJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPigJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPigJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPigJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsPigJobLoggingConfig) *WorkflowTemplateJobsPigJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPigJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsPigJobLoggingConfig) []WorkflowTemplateJobsPigJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPigJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPigJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsPigJobLoggingConfig) []WorkflowTemplateJobsPigJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPigJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkRJob(des, initial *WorkflowTemplateJobsSparkRJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkRJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkRJob{} + + if dcl.StringCanonicalize(des.MainRFileUri, initial.MainRFileUri) || dcl.IsZeroValue(des.MainRFileUri) { + cDes.MainRFileUri = initial.MainRFileUri + } else { + cDes.MainRFileUri = des.MainRFileUri + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkRJobSlice(des, initial []WorkflowTemplateJobsSparkRJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkRJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkRJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkRJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJob(c *Client, des, nw *WorkflowTemplateJobsSparkRJob) *WorkflowTemplateJobsSparkRJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkRJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainRFileUri, nw.MainRFileUri) { + nw.MainRFileUri = des.MainRFileUri + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobSet(c *Client, des, nw []WorkflowTemplateJobsSparkRJob) []WorkflowTemplateJobsSparkRJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkRJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkRJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobSlice(c *Client, des, nw []WorkflowTemplateJobsSparkRJob) []WorkflowTemplateJobsSparkRJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkRJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(des, initial *WorkflowTemplateJobsSparkRJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkRJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkRJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsSparkRJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkRJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkRJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkRJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsSparkRJobLoggingConfig) *WorkflowTemplateJobsSparkRJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkRJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsSparkRJobLoggingConfig) []WorkflowTemplateJobsSparkRJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkRJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkRJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsSparkRJobLoggingConfig) []WorkflowTemplateJobsSparkRJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkRJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJob(des, initial *WorkflowTemplateJobsSparkSqlJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkSqlJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkSqlJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.IsZeroValue(des.ScriptVariables) || (dcl.IsEmptyValueIndirect(des.ScriptVariables) && dcl.IsEmptyValueIndirect(initial.ScriptVariables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScriptVariables = initial.ScriptVariables + } else { + cDes.ScriptVariables = des.ScriptVariables + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobSlice(des, initial []WorkflowTemplateJobsSparkSqlJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkSqlJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkSqlJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkSqlJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c *Client, des, nw *WorkflowTemplateJobsSparkSqlJob) *WorkflowTemplateJobsSparkSqlJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkSqlJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobSet(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJob) []WorkflowTemplateJobsSparkSqlJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkSqlJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkSqlJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobSlice(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJob) []WorkflowTemplateJobsSparkSqlJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkSqlJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(des, initial *WorkflowTemplateJobsSparkSqlJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkSqlJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkSqlJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobQueryListSlice(des, initial []WorkflowTemplateJobsSparkSqlJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkSqlJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkSqlJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkSqlJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c *Client, des, nw *WorkflowTemplateJobsSparkSqlJobQueryList) *WorkflowTemplateJobsSparkSqlJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkSqlJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobQueryList) []WorkflowTemplateJobsSparkSqlJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkSqlJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkSqlJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobQueryList) []WorkflowTemplateJobsSparkSqlJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkSqlJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(des, initial *WorkflowTemplateJobsSparkSqlJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsSparkSqlJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkSqlJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkSqlJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsSparkSqlJobLoggingConfig) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkSqlJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobLoggingConfig) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkSqlJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkSqlJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobLoggingConfig) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkSqlJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPrestoJob(des, initial *WorkflowTemplateJobsPrestoJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPrestoJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPrestoJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsPrestoJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.BoolCanonicalize(des.ContinueOnFailure, initial.ContinueOnFailure) || dcl.IsZeroValue(des.ContinueOnFailure) { + cDes.ContinueOnFailure = initial.ContinueOnFailure + } else { + cDes.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringCanonicalize(des.OutputFormat, initial.OutputFormat) || dcl.IsZeroValue(des.OutputFormat) { + cDes.OutputFormat = initial.OutputFormat + } else { + cDes.OutputFormat = des.OutputFormat + } + if dcl.StringArrayCanonicalize(des.ClientTags, initial.ClientTags) { + cDes.ClientTags = initial.ClientTags + } else { + cDes.ClientTags = des.ClientTags + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPrestoJobSlice(des, initial []WorkflowTemplateJobsPrestoJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPrestoJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPrestoJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPrestoJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJob(c *Client, des, nw *WorkflowTemplateJobsPrestoJob) *WorkflowTemplateJobsPrestoJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPrestoJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.BoolCanonicalize(des.ContinueOnFailure, nw.ContinueOnFailure) { + nw.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringCanonicalize(des.OutputFormat, nw.OutputFormat) { + nw.OutputFormat = des.OutputFormat + } + if dcl.StringArrayCanonicalize(des.ClientTags, nw.ClientTags) { + nw.ClientTags = des.ClientTags + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobSet(c *Client, des, nw []WorkflowTemplateJobsPrestoJob) []WorkflowTemplateJobsPrestoJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPrestoJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPrestoJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobSlice(c *Client, des, nw []WorkflowTemplateJobsPrestoJob) []WorkflowTemplateJobsPrestoJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPrestoJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPrestoJobQueryList(des, initial *WorkflowTemplateJobsPrestoJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPrestoJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPrestoJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPrestoJobQueryListSlice(des, initial []WorkflowTemplateJobsPrestoJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPrestoJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPrestoJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPrestoJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c *Client, des, nw *WorkflowTemplateJobsPrestoJobQueryList) *WorkflowTemplateJobsPrestoJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPrestoJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsPrestoJobQueryList) []WorkflowTemplateJobsPrestoJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPrestoJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPrestoJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsPrestoJobQueryList) []WorkflowTemplateJobsPrestoJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPrestoJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(des, initial *WorkflowTemplateJobsPrestoJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPrestoJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPrestoJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsPrestoJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPrestoJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPrestoJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPrestoJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsPrestoJobLoggingConfig) *WorkflowTemplateJobsPrestoJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPrestoJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsPrestoJobLoggingConfig) []WorkflowTemplateJobsPrestoJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPrestoJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPrestoJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsPrestoJobLoggingConfig) []WorkflowTemplateJobsPrestoJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPrestoJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsScheduling(des, initial *WorkflowTemplateJobsScheduling, opts ...dcl.ApplyOption) *WorkflowTemplateJobsScheduling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsScheduling{} + + if dcl.IsZeroValue(des.MaxFailuresPerHour) || (dcl.IsEmptyValueIndirect(des.MaxFailuresPerHour) && dcl.IsEmptyValueIndirect(initial.MaxFailuresPerHour)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxFailuresPerHour = initial.MaxFailuresPerHour + } else { + cDes.MaxFailuresPerHour = des.MaxFailuresPerHour + } + if dcl.IsZeroValue(des.MaxFailuresTotal) || (dcl.IsEmptyValueIndirect(des.MaxFailuresTotal) && dcl.IsEmptyValueIndirect(initial.MaxFailuresTotal)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxFailuresTotal = initial.MaxFailuresTotal + } else { + cDes.MaxFailuresTotal = des.MaxFailuresTotal + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSchedulingSlice(des, initial []WorkflowTemplateJobsScheduling, opts ...dcl.ApplyOption) []WorkflowTemplateJobsScheduling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsScheduling, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsScheduling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsScheduling, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsScheduling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsScheduling(c *Client, des, nw *WorkflowTemplateJobsScheduling) *WorkflowTemplateJobsScheduling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsScheduling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSchedulingSet(c *Client, des, nw []WorkflowTemplateJobsScheduling) []WorkflowTemplateJobsScheduling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsScheduling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSchedulingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsScheduling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSchedulingSlice(c *Client, des, nw []WorkflowTemplateJobsScheduling) []WorkflowTemplateJobsScheduling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsScheduling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsScheduling(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParameters(des, initial *WorkflowTemplateParameters, opts ...dcl.ApplyOption) *WorkflowTemplateParameters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParameters{} + + if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { + cDes.Name = initial.Name + } else { + cDes.Name = des.Name + } + if dcl.StringArrayCanonicalize(des.Fields, initial.Fields) { + cDes.Fields = initial.Fields + } else { + cDes.Fields = des.Fields + } + if dcl.StringCanonicalize(des.Description, initial.Description) || dcl.IsZeroValue(des.Description) { + cDes.Description = initial.Description + } else { + cDes.Description = des.Description + } + cDes.Validation = canonicalizeWorkflowTemplateParametersValidation(des.Validation, initial.Validation, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateParametersSlice(des, initial []WorkflowTemplateParameters, opts ...dcl.ApplyOption) []WorkflowTemplateParameters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParameters, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParameters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParameters, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParameters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParameters(c *Client, des, nw *WorkflowTemplateParameters) *WorkflowTemplateParameters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParameters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Name, nw.Name) { + nw.Name = des.Name + } + if dcl.StringArrayCanonicalize(des.Fields, nw.Fields) { + nw.Fields = des.Fields + } + if dcl.StringCanonicalize(des.Description, nw.Description) { + nw.Description = des.Description + } + nw.Validation = canonicalizeNewWorkflowTemplateParametersValidation(c, des.Validation, nw.Validation) + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersSet(c *Client, des, nw []WorkflowTemplateParameters) []WorkflowTemplateParameters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParameters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParameters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersSlice(c *Client, des, nw []WorkflowTemplateParameters) []WorkflowTemplateParameters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParameters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParameters(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParametersValidation(des, initial *WorkflowTemplateParametersValidation, opts ...dcl.ApplyOption) *WorkflowTemplateParametersValidation { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParametersValidation{} + + cDes.Regex = canonicalizeWorkflowTemplateParametersValidationRegex(des.Regex, initial.Regex, opts...) + cDes.Values = canonicalizeWorkflowTemplateParametersValidationValues(des.Values, initial.Values, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateParametersValidationSlice(des, initial []WorkflowTemplateParametersValidation, opts ...dcl.ApplyOption) []WorkflowTemplateParametersValidation { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParametersValidation, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidation(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParametersValidation, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidation(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParametersValidation(c *Client, des, nw *WorkflowTemplateParametersValidation) *WorkflowTemplateParametersValidation { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParametersValidation while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Regex = canonicalizeNewWorkflowTemplateParametersValidationRegex(c, des.Regex, nw.Regex) + nw.Values = canonicalizeNewWorkflowTemplateParametersValidationValues(c, des.Values, nw.Values) + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersValidationSet(c *Client, des, nw []WorkflowTemplateParametersValidation) []WorkflowTemplateParametersValidation { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParametersValidation + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersValidationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidation(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersValidationSlice(c *Client, des, nw []WorkflowTemplateParametersValidation) []WorkflowTemplateParametersValidation { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParametersValidation + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidation(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParametersValidationRegex(des, initial *WorkflowTemplateParametersValidationRegex, opts ...dcl.ApplyOption) *WorkflowTemplateParametersValidationRegex { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParametersValidationRegex{} + + if dcl.StringArrayCanonicalize(des.Regexes, initial.Regexes) { + cDes.Regexes = initial.Regexes + } else { + cDes.Regexes = des.Regexes + } + + return cDes +} + +func canonicalizeWorkflowTemplateParametersValidationRegexSlice(des, initial []WorkflowTemplateParametersValidationRegex, opts ...dcl.ApplyOption) []WorkflowTemplateParametersValidationRegex { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParametersValidationRegex, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationRegex(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParametersValidationRegex, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationRegex(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParametersValidationRegex(c *Client, des, nw *WorkflowTemplateParametersValidationRegex) *WorkflowTemplateParametersValidationRegex { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParametersValidationRegex while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Regexes, nw.Regexes) { + nw.Regexes = des.Regexes + } + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersValidationRegexSet(c *Client, des, nw []WorkflowTemplateParametersValidationRegex) []WorkflowTemplateParametersValidationRegex { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParametersValidationRegex + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersValidationRegexNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationRegex(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersValidationRegexSlice(c *Client, des, nw []WorkflowTemplateParametersValidationRegex) []WorkflowTemplateParametersValidationRegex { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParametersValidationRegex + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationRegex(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParametersValidationValues(des, initial *WorkflowTemplateParametersValidationValues, opts ...dcl.ApplyOption) *WorkflowTemplateParametersValidationValues { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParametersValidationValues{} + + if dcl.StringArrayCanonicalize(des.Values, initial.Values) { + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + + return cDes +} + +func canonicalizeWorkflowTemplateParametersValidationValuesSlice(des, initial []WorkflowTemplateParametersValidationValues, opts ...dcl.ApplyOption) []WorkflowTemplateParametersValidationValues { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParametersValidationValues, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationValues(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParametersValidationValues, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationValues(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParametersValidationValues(c *Client, des, nw *WorkflowTemplateParametersValidationValues) *WorkflowTemplateParametersValidationValues { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParametersValidationValues while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Values, nw.Values) { + nw.Values = des.Values + } + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersValidationValuesSet(c *Client, des, nw []WorkflowTemplateParametersValidationValues) []WorkflowTemplateParametersValidationValues { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParametersValidationValues + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersValidationValuesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationValues(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersValidationValuesSlice(c *Client, des, nw []WorkflowTemplateParametersValidationValues) []WorkflowTemplateParametersValidationValues { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParametersValidationValues + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationValues(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffWorkflowTemplate(c *Client, desired, actual *WorkflowTemplate, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.EncryptionConfig, actual.EncryptionConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateEncryptionConfigNewStyle, EmptyObject: EmptyWorkflowTemplateEncryptionConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EncryptionConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Placement, actual.Placement, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementNewStyle, EmptyObject: EmptyWorkflowTemplatePlacement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Placement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Jobs, actual.Jobs, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsNewStyle, EmptyObject: EmptyWorkflowTemplateJobs, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Jobs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Parameters, actual.Parameters, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersNewStyle, EmptyObject: EmptyWorkflowTemplateParameters, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Parameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DagTimeout, actual.DagTimeout, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DagTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareWorkflowTemplateEncryptionConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateEncryptionConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateEncryptionConfig or *WorkflowTemplateEncryptionConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateEncryptionConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateEncryptionConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKey, actual.KmsKey, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacement) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacement or *WorkflowTemplatePlacement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacement) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ManagedCluster, actual.ManagedCluster, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedCluster, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterSelector, actual.ClusterSelector, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementClusterSelectorNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementClusterSelector, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterSelector")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedCluster) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedCluster or *WorkflowTemplatePlacementManagedCluster", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedCluster) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedCluster", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ClusterName, actual.ClusterName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfig or *WorkflowTemplatePlacementManagedClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.StagingBucket, actual.StagingBucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TempBucket, actual.TempBucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TempBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GceClusterConfig, actual.GceClusterConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GceClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MasterConfig, actual.MasterConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MasterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecondaryWorkerConfig, actual.SecondaryWorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecondaryWorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SoftwareConfig, actual.SoftwareConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SoftwareConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InitializationActions, actual.InitializationActions, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigInitializationActionsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InitializationActions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EncryptionConfig, actual.EncryptionConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EncryptionConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoscalingConfig, actual.AutoscalingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoscalingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityConfig, actual.SecurityConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecurityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LifecycleConfig, actual.LifecycleConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LifecycleConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EndpointConfig, actual.EndpointConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigEndpointConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EndpointConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- if ne $.TargetVersionName "ga" }} + + if ds, err := dcl.Diff(desired.GkeClusterConfig, actual.GkeClusterConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetastoreConfig, actual.MetastoreConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- end }} + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ZoneUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Network, actual.Network, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Subnetwork, actual.Subnetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetworkUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIPOnly, actual.InternalIPOnly, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InternalIpOnly")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateIPv6GoogleAccess, actual.PrivateIPv6GoogleAccess, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateIpv6GoogleAccess")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccountScopes, actual.ServiceAccountScopes, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccountScopes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{Type: "Set", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReservationAffinity, actual.ReservationAffinity, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReservationAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodeGroupAffinity, actual.NodeGroupAffinity, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ShieldedInstanceConfig, actual.ShieldedInstanceConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ShieldedInstanceConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConsumeReservationType, actual.ConsumeReservationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConsumeReservationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NodeGroup, actual.NodeGroup, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableSecureBoot, actual.EnableSecureBoot, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableSecureBoot")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableVtpm, actual.EnableVtpm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableVtpm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableIntegrityMonitoring, actual.EnableIntegrityMonitoring, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableIntegrityMonitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfig or *WorkflowTemplatePlacementManagedClusterConfigMasterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig or *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig or *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators or *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfig or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig or *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ImageVersion, actual.ImageVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.OptionalComponents, actual.OptionalComponents, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("OptionalComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigInitializationActionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigInitializationActions or *WorkflowTemplatePlacementManagedClusterConfigInitializationActions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigInitializationActions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ExecutableFile, actual.ExecutableFile, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutableFile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionTimeout, actual.ExecutionTimeout, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutionTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig or *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GcePdKmsKeyName, actual.GcePdKmsKeyName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcePdKmsKeyName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig or *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Policy, actual.Policy, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfig or *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KerberosConfig, actual.KerberosConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KerberosConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig or *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableKerberos, actual.EnableKerberos, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableKerberos")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootPrincipalPassword, actual.RootPrincipalPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootPrincipalPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKey, actual.KmsKey, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Keystore, actual.Keystore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystoreUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Truststore, actual.Truststore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststoreUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeystorePassword, actual.KeystorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystorePasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeyPassword, actual.KeyPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TruststorePassword, actual.TruststorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststorePasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustRealm, actual.CrossRealmTrustRealm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustRealm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustKdc, actual.CrossRealmTrustKdc, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustKdc")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustAdminServer, actual.CrossRealmTrustAdminServer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustAdminServer")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustSharedPassword, actual.CrossRealmTrustSharedPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustSharedPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KdcDbKey, actual.KdcDbKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KdcDbKeyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TgtLifetimeHours, actual.TgtLifetimeHours, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TgtLifetimeHours")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Realm, actual.Realm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Realm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig or *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IdleDeleteTtl, actual.IdleDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleDeleteTtl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoDeleteTime, actual.AutoDeleteTime, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoDeleteTtl, actual.AutoDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTtl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdleStartTime, actual.IdleStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleStartTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigEndpointConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEndpointConfig or *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEndpointConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HttpPorts, actual.HttpPorts, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpPorts")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHttpPortAccess, actual.EnableHttpPortAccess, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableHttpPortAccess")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig or *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NamespacedGkeDeploymentTarget, actual.NamespacedGkeDeploymentTarget, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NamespacedGkeDeploymentTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget or *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetGkeCluster, actual.TargetGkeCluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TargetGkeCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterNamespace, actual.ClusterNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterNamespace")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig or *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataprocMetastoreService, actual.DataprocMetastoreService, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetastoreService")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementClusterSelectorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementClusterSelector) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementClusterSelector) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementClusterSelector or *WorkflowTemplatePlacementClusterSelector", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementClusterSelector) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementClusterSelector) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementClusterSelector", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterLabels, actual.ClusterLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobs) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobs) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobs or *WorkflowTemplateJobs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobs) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobs) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.StepId, actual.StepId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StepId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HadoopJob, actual.HadoopJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHadoopJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHadoopJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HadoopJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkJob, actual.SparkJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PysparkJob, actual.PysparkJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPysparkJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPysparkJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PysparkJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HiveJob, actual.HiveJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHiveJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHiveJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HiveJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PigJob, actual.PigJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPigJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPigJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PigJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkRJob, actual.SparkRJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkRJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkRJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkRJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkSqlJob, actual.SparkSqlJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkSqlJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkSqlJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkSqlJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrestoJob, actual.PrestoJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPrestoJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPrestoJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrestoJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Scheduling, actual.Scheduling, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSchedulingNewStyle, EmptyObject: EmptyWorkflowTemplateJobsScheduling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Scheduling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrerequisiteStepIds, actual.PrerequisiteStepIds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrerequisiteStepIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHadoopJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHadoopJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHadoopJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJob or *WorkflowTemplateJobsHadoopJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHadoopJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHadoopJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainJarFileUri, actual.MainJarFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainJarFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainClass, actual.MainClass, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainClass")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHadoopJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHadoopJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHadoopJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJobLoggingConfig or *WorkflowTemplateJobsHadoopJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJob or *WorkflowTemplateJobsSparkJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainJarFileUri, actual.MainJarFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainJarFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainClass, actual.MainClass, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainClass")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJobLoggingConfig or *WorkflowTemplateJobsSparkJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPysparkJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPysparkJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPysparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJob or *WorkflowTemplateJobsPysparkJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPysparkJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPysparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainPythonFileUri, actual.MainPythonFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainPythonFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PythonFileUris, actual.PythonFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PythonFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPysparkJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPysparkJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPysparkJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJobLoggingConfig or *WorkflowTemplateJobsPysparkJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHiveJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHiveJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHiveJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJob or *WorkflowTemplateJobsHiveJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHiveJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHiveJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHiveJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHiveJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContinueOnFailure, actual.ContinueOnFailure, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContinueOnFailure")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScriptVariables, actual.ScriptVariables, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ScriptVariables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHiveJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHiveJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHiveJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJobQueryList or *WorkflowTemplateJobsHiveJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHiveJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHiveJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPigJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPigJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPigJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJob or *WorkflowTemplateJobsPigJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPigJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPigJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPigJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPigJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContinueOnFailure, actual.ContinueOnFailure, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContinueOnFailure")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScriptVariables, actual.ScriptVariables, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ScriptVariables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPigJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPigJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPigJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPigJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPigJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobQueryList or *WorkflowTemplateJobsPigJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPigJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPigJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPigJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobLoggingConfig or *WorkflowTemplateJobsPigJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkRJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkRJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkRJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJob or *WorkflowTemplateJobsSparkRJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkRJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkRJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainRFileUri, actual.MainRFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainRFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkRJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkRJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkRJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJobLoggingConfig or *WorkflowTemplateJobsSparkRJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkSqlJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkSqlJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkSqlJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJob or *WorkflowTemplateJobsSparkSqlJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkSqlJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkSqlJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkSqlJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkSqlJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScriptVariables, actual.ScriptVariables, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ScriptVariables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkSqlJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkSqlJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobQueryList or *WorkflowTemplateJobsSparkSqlJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkSqlJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobLoggingConfig or *WorkflowTemplateJobsSparkSqlJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPrestoJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPrestoJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPrestoJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJob or *WorkflowTemplateJobsPrestoJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPrestoJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPrestoJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPrestoJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPrestoJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContinueOnFailure, actual.ContinueOnFailure, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContinueOnFailure")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.OutputFormat, actual.OutputFormat, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("OutputFormat")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClientTags, actual.ClientTags, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClientTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPrestoJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPrestoJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPrestoJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobQueryList or *WorkflowTemplateJobsPrestoJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPrestoJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobLoggingConfig or *WorkflowTemplateJobsPrestoJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSchedulingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsScheduling) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsScheduling) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsScheduling or *WorkflowTemplateJobsScheduling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsScheduling) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsScheduling) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsScheduling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxFailuresPerHour, actual.MaxFailuresPerHour, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxFailuresPerHour")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxFailuresTotal, actual.MaxFailuresTotal, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxFailuresTotal")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParameters) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParameters or *WorkflowTemplateParameters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParameters) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParameters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fields, actual.Fields, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fields")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Validation, actual.Validation, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersValidationNewStyle, EmptyObject: EmptyWorkflowTemplateParametersValidation, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Validation")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersValidationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParametersValidation) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParametersValidation) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidation or *WorkflowTemplateParametersValidation", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParametersValidation) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParametersValidation) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidation", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Regex, actual.Regex, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersValidationRegexNewStyle, EmptyObject: EmptyWorkflowTemplateParametersValidationRegex, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Regex")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersValidationValuesNewStyle, EmptyObject: EmptyWorkflowTemplateParametersValidationValues, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersValidationRegexNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParametersValidationRegex) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParametersValidationRegex) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationRegex or *WorkflowTemplateParametersValidationRegex", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParametersValidationRegex) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParametersValidationRegex) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationRegex", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Regexes, actual.Regexes, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Regexes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersValidationValuesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParametersValidationValues) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParametersValidationValues) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationValues or *WorkflowTemplateParametersValidationValues", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParametersValidationValues) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParametersValidationValues) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationValues", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *WorkflowTemplate) urlNormalized() *WorkflowTemplate { + normalized := dcl.Copy(*r).(WorkflowTemplate) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DagTimeout = dcl.SelfLinkToName(r.DagTimeout) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *WorkflowTemplate) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the WorkflowTemplate resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *WorkflowTemplate) marshal(c *Client) ([]byte, error) { + m, err := expandWorkflowTemplate(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling WorkflowTemplate: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalWorkflowTemplate decodes JSON responses into the WorkflowTemplate resource schema. +func unmarshalWorkflowTemplate(b []byte, c *Client, res *WorkflowTemplate) (*WorkflowTemplate, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapWorkflowTemplate(m, c, res) +} + +func unmarshalMapWorkflowTemplate(m map[string]interface{}, c *Client, res *WorkflowTemplate) (*WorkflowTemplate, error) { + + flattened := flattenWorkflowTemplate(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandWorkflowTemplate expands WorkflowTemplate into a JSON request object. +func expandWorkflowTemplate(c *Client, f *WorkflowTemplate) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandWorkflowTemplateEncryptionConfig(c, f.EncryptionConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EncryptionConfig into encryptionConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["encryptionConfig"] = v + } + if v, err := expandWorkflowTemplatePlacement(c, f.Placement, res); err != nil { + return nil, fmt.Errorf("error expanding Placement into placement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["placement"] = v + } + if v, err := expandWorkflowTemplateJobsSlice(c, f.Jobs, res); err != nil { + return nil, fmt.Errorf("error expanding Jobs into jobs: %w", err) + } else if v != nil { + m["jobs"] = v + } + if v, err := expandWorkflowTemplateParametersSlice(c, f.Parameters, res); err != nil { + return nil, fmt.Errorf("error expanding Parameters into parameters: %w", err) + } else if v != nil { + m["parameters"] = v + } + if v := f.DagTimeout; dcl.ValueShouldBeSent(v) { + m["dagTimeout"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenWorkflowTemplate flattens WorkflowTemplate from a JSON request object into the +// WorkflowTemplate type. +func flattenWorkflowTemplate(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplate { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &WorkflowTemplate{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Version = dcl.FlattenInteger(m["version"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.EncryptionConfig = flattenWorkflowTemplateEncryptionConfig(c, m["encryptionConfig"], res) + resultRes.Placement = flattenWorkflowTemplatePlacement(c, m["placement"], res) + resultRes.Jobs = flattenWorkflowTemplateJobsSlice(c, m["jobs"], res) + resultRes.Parameters = flattenWorkflowTemplateParametersSlice(c, m["parameters"], res) + resultRes.DagTimeout = dcl.FlattenString(m["dagTimeout"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandWorkflowTemplateEncryptionConfigMap expands the contents of WorkflowTemplateEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplateEncryptionConfigMap(c *Client, f map[string]WorkflowTemplateEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateEncryptionConfigSlice expands the contents of WorkflowTemplateEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplateEncryptionConfigSlice(c *Client, f []WorkflowTemplateEncryptionConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateEncryptionConfigMap flattens the contents of WorkflowTemplateEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplateEncryptionConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateEncryptionConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateEncryptionConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateEncryptionConfig{} + } + + items := make(map[string]WorkflowTemplateEncryptionConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateEncryptionConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateEncryptionConfigSlice flattens the contents of WorkflowTemplateEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplateEncryptionConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateEncryptionConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateEncryptionConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateEncryptionConfig{} + } + + items := make([]WorkflowTemplateEncryptionConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateEncryptionConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateEncryptionConfig expands an instance of WorkflowTemplateEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplateEncryptionConfig(c *Client, f *WorkflowTemplateEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKey; !dcl.IsEmptyValueIndirect(v) { + m["kmsKey"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateEncryptionConfig flattens an instance of WorkflowTemplateEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplateEncryptionConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateEncryptionConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateEncryptionConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateEncryptionConfig + } + r.KmsKey = dcl.FlattenString(m["kmsKey"]) + + return r +} + +// expandWorkflowTemplatePlacementMap expands the contents of WorkflowTemplatePlacement into a JSON +// request object. +func expandWorkflowTemplatePlacementMap(c *Client, f map[string]WorkflowTemplatePlacement, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementSlice expands the contents of WorkflowTemplatePlacement into a JSON +// request object. +func expandWorkflowTemplatePlacementSlice(c *Client, f []WorkflowTemplatePlacement, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementMap flattens the contents of WorkflowTemplatePlacement from a JSON +// response object. +func flattenWorkflowTemplatePlacementMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacement{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacement{} + } + + items := make(map[string]WorkflowTemplatePlacement) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementSlice flattens the contents of WorkflowTemplatePlacement from a JSON +// response object. +func flattenWorkflowTemplatePlacementSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacement { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacement{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacement{} + } + + items := make([]WorkflowTemplatePlacement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacement expands an instance of WorkflowTemplatePlacement into a JSON +// request object. +func expandWorkflowTemplatePlacement(c *Client, f *WorkflowTemplatePlacement, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplatePlacementManagedCluster(c, f.ManagedCluster, res); err != nil { + return nil, fmt.Errorf("error expanding ManagedCluster into managedCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["managedCluster"] = v + } + if v, err := expandWorkflowTemplatePlacementClusterSelector(c, f.ClusterSelector, res); err != nil { + return nil, fmt.Errorf("error expanding ClusterSelector into clusterSelector: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["clusterSelector"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacement flattens an instance of WorkflowTemplatePlacement from a JSON +// response object. +func flattenWorkflowTemplatePlacement(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacement + } + r.ManagedCluster = flattenWorkflowTemplatePlacementManagedCluster(c, m["managedCluster"], res) + r.ClusterSelector = flattenWorkflowTemplatePlacementClusterSelector(c, m["clusterSelector"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterMap expands the contents of WorkflowTemplatePlacementManagedCluster into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterMap(c *Client, f map[string]WorkflowTemplatePlacementManagedCluster, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedCluster(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterSlice expands the contents of WorkflowTemplatePlacementManagedCluster into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterSlice(c *Client, f []WorkflowTemplatePlacementManagedCluster, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedCluster(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterMap flattens the contents of WorkflowTemplatePlacementManagedCluster from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedCluster { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedCluster{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedCluster{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedCluster) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedCluster(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterSlice flattens the contents of WorkflowTemplatePlacementManagedCluster from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedCluster { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedCluster{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedCluster{} + } + + items := make([]WorkflowTemplatePlacementManagedCluster, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedCluster(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedCluster expands an instance of WorkflowTemplatePlacementManagedCluster into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedCluster(c *Client, f *WorkflowTemplatePlacementManagedCluster, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ClusterName; !dcl.IsEmptyValueIndirect(v) { + m["clusterName"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedCluster flattens an instance of WorkflowTemplatePlacementManagedCluster from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedCluster(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedCluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedCluster{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedCluster + } + r.ClusterName = dcl.FlattenString(m["clusterName"]) + r.Config = flattenWorkflowTemplatePlacementManagedClusterConfig(c, m["config"], res) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.StagingBucket; !dcl.IsEmptyValueIndirect(v) { + m["configBucket"] = v + } + if v := f.TempBucket; !dcl.IsEmptyValueIndirect(v) { + m["tempBucket"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, f.GceClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GceClusterConfig into gceClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gceClusterConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, f.MasterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MasterConfig into masterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["masterConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["secondaryWorkerConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, f.SoftwareConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SoftwareConfig into softwareConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["softwareConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c, f.InitializationActions, res); err != nil { + return nil, fmt.Errorf("error expanding InitializationActions into initializationActions: %w", err) + } else if v != nil { + m["initializationActions"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, f.EncryptionConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EncryptionConfig into encryptionConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["encryptionConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, f.AutoscalingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding AutoscalingConfig into autoscalingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscalingConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, f.SecurityConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecurityConfig into securityConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["securityConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, f.LifecycleConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LifecycleConfig into lifecycleConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lifecycleConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, f.EndpointConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EndpointConfig into endpointConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["endpointConfig"] = v + } +{{- if ne $.TargetVersionName "ga" }} + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, f.GkeClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GkeClusterConfig into gkeClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gkeClusterConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, f.MetastoreConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MetastoreConfig into metastoreConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["metastoreConfig"] = v + } +{{- end }} + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfig + } + r.StagingBucket = dcl.FlattenString(m["configBucket"]) + r.TempBucket = dcl.FlattenString(m["tempBucket"]) + r.GceClusterConfig = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, m["gceClusterConfig"], res) + r.MasterConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, m["masterConfig"], res) + r.WorkerConfig = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, m["workerConfig"], res) + r.SecondaryWorkerConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, m["secondaryWorkerConfig"], res) + r.SoftwareConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, m["softwareConfig"], res) + r.InitializationActions = flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c, m["initializationActions"], res) + r.EncryptionConfig = flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, m["encryptionConfig"], res) + r.AutoscalingConfig = flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, m["autoscalingConfig"], res) + r.SecurityConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, m["securityConfig"], res) + r.LifecycleConfig = flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, m["lifecycleConfig"], res) + r.EndpointConfig = flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, m["endpointConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.GkeClusterConfig = flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, m["gkeClusterConfig"], res) + r.MetastoreConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, m["metastoreConfig"], res) +{{- end }} + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Zone; !dcl.IsEmptyValueIndirect(v) { + m["zoneUri"] = v + } + if v := f.Network; !dcl.IsEmptyValueIndirect(v) { + m["networkUri"] = v + } + if v := f.Subnetwork; !dcl.IsEmptyValueIndirect(v) { + m["subnetworkUri"] = v + } + if v := f.InternalIPOnly; !dcl.IsEmptyValueIndirect(v) { + m["internalIpOnly"] = v + } + if v := f.PrivateIPv6GoogleAccess; !dcl.IsEmptyValueIndirect(v) { + m["privateIpv6GoogleAccess"] = v + } + if v := f.ServiceAccount; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccount"] = v + } + if v := f.ServiceAccountScopes; v != nil { + m["serviceAccountScopes"] = v + } + if v := f.Tags; v != nil { + m["tags"] = v + } + if v := f.Metadata; !dcl.IsEmptyValueIndirect(v) { + m["metadata"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, f.ReservationAffinity, res); err != nil { + return nil, fmt.Errorf("error expanding ReservationAffinity into reservationAffinity: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["reservationAffinity"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, f.NodeGroupAffinity, res); err != nil { + return nil, fmt.Errorf("error expanding NodeGroupAffinity into nodeGroupAffinity: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["nodeGroupAffinity"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, f.ShieldedInstanceConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ShieldedInstanceConfig into shieldedInstanceConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["shieldedInstanceConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } + r.Zone = dcl.FlattenString(m["zoneUri"]) + r.Network = dcl.FlattenString(m["networkUri"]) + r.Subnetwork = dcl.FlattenString(m["subnetworkUri"]) + r.InternalIPOnly = dcl.FlattenBool(m["internalIpOnly"]) + r.PrivateIPv6GoogleAccess = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(m["privateIpv6GoogleAccess"]) + r.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + r.ServiceAccountScopes = dcl.FlattenStringSlice(m["serviceAccountScopes"]) + r.Tags = dcl.FlattenStringSlice(m["tags"]) + r.Metadata = dcl.FlattenKeyValuePairs(m["metadata"]) + r.ReservationAffinity = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, m["reservationAffinity"], res) + r.NodeGroupAffinity = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, m["nodeGroupAffinity"], res) + r.ShieldedInstanceConfig = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, m["shieldedInstanceConfig"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ConsumeReservationType; !dcl.IsEmptyValueIndirect(v) { + m["consumeReservationType"] = v + } + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Values; v != nil { + m["values"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } + r.ConsumeReservationType = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(m["consumeReservationType"]) + r.Key = dcl.FlattenString(m["key"]) + r.Values = dcl.FlattenStringSlice(m["values"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NodeGroup; !dcl.IsEmptyValueIndirect(v) { + m["nodeGroupUri"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } + r.NodeGroup = dcl.FlattenString(m["nodeGroupUri"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableSecureBoot; !dcl.IsEmptyValueIndirect(v) { + m["enableSecureBoot"] = v + } + if v := f.EnableVtpm; !dcl.IsEmptyValueIndirect(v) { + m["enableVtpm"] = v + } + if v := f.EnableIntegrityMonitoring; !dcl.IsEmptyValueIndirect(v) { + m["enableIntegrityMonitoring"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } + r.EnableSecureBoot = dcl.FlattenBool(m["enableSecureBoot"]) + r.EnableVtpm = dcl.FlattenBool(m["enableVtpm"]) + r.EnableIntegrityMonitoring = dcl.FlattenBool(m["enableIntegrityMonitoring"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ImageVersion; !dcl.IsEmptyValueIndirect(v) { + m["imageVersion"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.OptionalComponents; v != nil { + m["optionalComponents"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } + r.ImageVersion = dcl.FlattenString(m["imageVersion"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.OptionalComponents = flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c, m["optionalComponents"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigInitializationActions, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions expands an instance of WorkflowTemplatePlacementManagedClusterConfigInitializationActions into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigInitializationActions, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ExecutableFile; !dcl.IsEmptyValueIndirect(v) { + m["executableFile"] = v + } + if v := f.ExecutionTimeout; !dcl.IsEmptyValueIndirect(v) { + m["executionTimeout"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions flattens an instance of WorkflowTemplatePlacementManagedClusterConfigInitializationActions from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions + } + r.ExecutableFile = dcl.FlattenString(m["executableFile"]) + r.ExecutionTimeout = dcl.FlattenString(m["executionTimeout"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.GcePdKmsKeyName; !dcl.IsEmptyValueIndirect(v) { + m["gcePdKmsKeyName"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } + r.GcePdKmsKeyName = dcl.FlattenString(m["gcePdKmsKeyName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Policy; !dcl.IsEmptyValueIndirect(v) { + m["policyUri"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } + r.Policy = dcl.FlattenString(m["policyUri"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, f.KerberosConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KerberosConfig into kerberosConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kerberosConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } + r.KerberosConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, m["kerberosConfig"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableKerberos; !dcl.IsEmptyValueIndirect(v) { + m["enableKerberos"] = v + } + if v := f.RootPrincipalPassword; !dcl.IsEmptyValueIndirect(v) { + m["rootPrincipalPasswordUri"] = v + } + if v := f.KmsKey; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyUri"] = v + } + if v := f.Keystore; !dcl.IsEmptyValueIndirect(v) { + m["keystoreUri"] = v + } + if v := f.Truststore; !dcl.IsEmptyValueIndirect(v) { + m["truststoreUri"] = v + } + if v := f.KeystorePassword; !dcl.IsEmptyValueIndirect(v) { + m["keystorePasswordUri"] = v + } + if v := f.KeyPassword; !dcl.IsEmptyValueIndirect(v) { + m["keyPasswordUri"] = v + } + if v := f.TruststorePassword; !dcl.IsEmptyValueIndirect(v) { + m["truststorePasswordUri"] = v + } + if v := f.CrossRealmTrustRealm; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustRealm"] = v + } + if v := f.CrossRealmTrustKdc; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustKdc"] = v + } + if v := f.CrossRealmTrustAdminServer; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustAdminServer"] = v + } + if v := f.CrossRealmTrustSharedPassword; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustSharedPasswordUri"] = v + } + if v := f.KdcDbKey; !dcl.IsEmptyValueIndirect(v) { + m["kdcDbKeyUri"] = v + } + if v := f.TgtLifetimeHours; !dcl.IsEmptyValueIndirect(v) { + m["tgtLifetimeHours"] = v + } + if v := f.Realm; !dcl.IsEmptyValueIndirect(v) { + m["realm"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } + r.EnableKerberos = dcl.FlattenBool(m["enableKerberos"]) + r.RootPrincipalPassword = dcl.FlattenString(m["rootPrincipalPasswordUri"]) + r.KmsKey = dcl.FlattenString(m["kmsKeyUri"]) + r.Keystore = dcl.FlattenString(m["keystoreUri"]) + r.Truststore = dcl.FlattenString(m["truststoreUri"]) + r.KeystorePassword = dcl.FlattenString(m["keystorePasswordUri"]) + r.KeyPassword = dcl.FlattenString(m["keyPasswordUri"]) + r.TruststorePassword = dcl.FlattenString(m["truststorePasswordUri"]) + r.CrossRealmTrustRealm = dcl.FlattenString(m["crossRealmTrustRealm"]) + r.CrossRealmTrustKdc = dcl.FlattenString(m["crossRealmTrustKdc"]) + r.CrossRealmTrustAdminServer = dcl.FlattenString(m["crossRealmTrustAdminServer"]) + r.CrossRealmTrustSharedPassword = dcl.FlattenString(m["crossRealmTrustSharedPasswordUri"]) + r.KdcDbKey = dcl.FlattenString(m["kdcDbKeyUri"]) + r.TgtLifetimeHours = dcl.FlattenInteger(m["tgtLifetimeHours"]) + r.Realm = dcl.FlattenString(m["realm"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IdleDeleteTtl; !dcl.IsEmptyValueIndirect(v) { + m["idleDeleteTtl"] = v + } + if v := f.AutoDeleteTime; !dcl.IsEmptyValueIndirect(v) { + m["autoDeleteTime"] = v + } + if v := f.AutoDeleteTtl; !dcl.IsEmptyValueIndirect(v) { + m["autoDeleteTtl"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } + r.IdleDeleteTtl = dcl.FlattenString(m["idleDeleteTtl"]) + r.AutoDeleteTime = dcl.FlattenString(m["autoDeleteTime"]) + r.AutoDeleteTtl = dcl.FlattenString(m["autoDeleteTtl"]) + r.IdleStartTime = dcl.FlattenString(m["idleStartTime"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableHttpPortAccess; !dcl.IsEmptyValueIndirect(v) { + m["enableHttpPortAccess"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } + r.HttpPorts = dcl.FlattenKeyValuePairs(m["httpPorts"]) + r.EnableHttpPortAccess = dcl.FlattenBool(m["enableHttpPortAccess"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, f.NamespacedGkeDeploymentTarget, res); err != nil { + return nil, fmt.Errorf("error expanding NamespacedGkeDeploymentTarget into namespacedGkeDeploymentTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["namespacedGkeDeploymentTarget"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } + r.NamespacedGkeDeploymentTarget = flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, m["namespacedGkeDeploymentTarget"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget expands an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetGkeCluster; !dcl.IsEmptyValueIndirect(v) { + m["targetGkeCluster"] = v + } + if v := f.ClusterNamespace; !dcl.IsEmptyValueIndirect(v) { + m["clusterNamespace"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + r.TargetGkeCluster = dcl.FlattenString(m["targetGkeCluster"]) + r.ClusterNamespace = dcl.FlattenString(m["clusterNamespace"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataprocMetastoreService; !dcl.IsEmptyValueIndirect(v) { + m["dataprocMetastoreService"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } + r.DataprocMetastoreService = dcl.FlattenString(m["dataprocMetastoreService"]) + + return r +} + +{{- end }} +// expandWorkflowTemplatePlacementClusterSelectorMap expands the contents of WorkflowTemplatePlacementClusterSelector into a JSON +// request object. +func expandWorkflowTemplatePlacementClusterSelectorMap(c *Client, f map[string]WorkflowTemplatePlacementClusterSelector, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementClusterSelector(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementClusterSelectorSlice expands the contents of WorkflowTemplatePlacementClusterSelector into a JSON +// request object. +func expandWorkflowTemplatePlacementClusterSelectorSlice(c *Client, f []WorkflowTemplatePlacementClusterSelector, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementClusterSelector(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementClusterSelectorMap flattens the contents of WorkflowTemplatePlacementClusterSelector from a JSON +// response object. +func flattenWorkflowTemplatePlacementClusterSelectorMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementClusterSelector { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementClusterSelector{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementClusterSelector{} + } + + items := make(map[string]WorkflowTemplatePlacementClusterSelector) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementClusterSelector(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementClusterSelectorSlice flattens the contents of WorkflowTemplatePlacementClusterSelector from a JSON +// response object. +func flattenWorkflowTemplatePlacementClusterSelectorSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementClusterSelector { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementClusterSelector{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementClusterSelector{} + } + + items := make([]WorkflowTemplatePlacementClusterSelector, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementClusterSelector(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementClusterSelector expands an instance of WorkflowTemplatePlacementClusterSelector into a JSON +// request object. +func expandWorkflowTemplatePlacementClusterSelector(c *Client, f *WorkflowTemplatePlacementClusterSelector, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Zone; !dcl.IsEmptyValueIndirect(v) { + m["zone"] = v + } + if v := f.ClusterLabels; !dcl.IsEmptyValueIndirect(v) { + m["clusterLabels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementClusterSelector flattens an instance of WorkflowTemplatePlacementClusterSelector from a JSON +// response object. +func flattenWorkflowTemplatePlacementClusterSelector(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementClusterSelector { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementClusterSelector{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementClusterSelector + } + r.Zone = dcl.FlattenString(m["zone"]) + r.ClusterLabels = dcl.FlattenKeyValuePairs(m["clusterLabels"]) + + return r +} + +// expandWorkflowTemplateJobsMap expands the contents of WorkflowTemplateJobs into a JSON +// request object. +func expandWorkflowTemplateJobsMap(c *Client, f map[string]WorkflowTemplateJobs, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSlice expands the contents of WorkflowTemplateJobs into a JSON +// request object. +func expandWorkflowTemplateJobsSlice(c *Client, f []WorkflowTemplateJobs, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsMap flattens the contents of WorkflowTemplateJobs from a JSON +// response object. +func flattenWorkflowTemplateJobsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobs{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobs{} + } + + items := make(map[string]WorkflowTemplateJobs) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSlice flattens the contents of WorkflowTemplateJobs from a JSON +// response object. +func flattenWorkflowTemplateJobsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobs { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobs{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobs{} + } + + items := make([]WorkflowTemplateJobs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobs expands an instance of WorkflowTemplateJobs into a JSON +// request object. +func expandWorkflowTemplateJobs(c *Client, f *WorkflowTemplateJobs, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.StepId; !dcl.IsEmptyValueIndirect(v) { + m["stepId"] = v + } + if v, err := expandWorkflowTemplateJobsHadoopJob(c, f.HadoopJob, res); err != nil { + return nil, fmt.Errorf("error expanding HadoopJob into hadoopJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["hadoopJob"] = v + } + if v, err := expandWorkflowTemplateJobsSparkJob(c, f.SparkJob, res); err != nil { + return nil, fmt.Errorf("error expanding SparkJob into sparkJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkJob"] = v + } + if v, err := expandWorkflowTemplateJobsPysparkJob(c, f.PysparkJob, res); err != nil { + return nil, fmt.Errorf("error expanding PysparkJob into pysparkJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["pysparkJob"] = v + } + if v, err := expandWorkflowTemplateJobsHiveJob(c, f.HiveJob, res); err != nil { + return nil, fmt.Errorf("error expanding HiveJob into hiveJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["hiveJob"] = v + } + if v, err := expandWorkflowTemplateJobsPigJob(c, f.PigJob, res); err != nil { + return nil, fmt.Errorf("error expanding PigJob into pigJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["pigJob"] = v + } + if v, err := expandWorkflowTemplateJobsSparkRJob(c, f.SparkRJob, res); err != nil { + return nil, fmt.Errorf("error expanding SparkRJob into sparkRJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkRJob"] = v + } + if v, err := expandWorkflowTemplateJobsSparkSqlJob(c, f.SparkSqlJob, res); err != nil { + return nil, fmt.Errorf("error expanding SparkSqlJob into sparkSqlJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkSqlJob"] = v + } + if v, err := expandWorkflowTemplateJobsPrestoJob(c, f.PrestoJob, res); err != nil { + return nil, fmt.Errorf("error expanding PrestoJob into prestoJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["prestoJob"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + if v, err := expandWorkflowTemplateJobsScheduling(c, f.Scheduling, res); err != nil { + return nil, fmt.Errorf("error expanding Scheduling into scheduling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["scheduling"] = v + } + if v := f.PrerequisiteStepIds; v != nil { + m["prerequisiteStepIds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobs flattens an instance of WorkflowTemplateJobs from a JSON +// response object. +func flattenWorkflowTemplateJobs(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobs + } + r.StepId = dcl.FlattenString(m["stepId"]) + r.HadoopJob = flattenWorkflowTemplateJobsHadoopJob(c, m["hadoopJob"], res) + r.SparkJob = flattenWorkflowTemplateJobsSparkJob(c, m["sparkJob"], res) + r.PysparkJob = flattenWorkflowTemplateJobsPysparkJob(c, m["pysparkJob"], res) + r.HiveJob = flattenWorkflowTemplateJobsHiveJob(c, m["hiveJob"], res) + r.PigJob = flattenWorkflowTemplateJobsPigJob(c, m["pigJob"], res) + r.SparkRJob = flattenWorkflowTemplateJobsSparkRJob(c, m["sparkRJob"], res) + r.SparkSqlJob = flattenWorkflowTemplateJobsSparkSqlJob(c, m["sparkSqlJob"], res) + r.PrestoJob = flattenWorkflowTemplateJobsPrestoJob(c, m["prestoJob"], res) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + r.Scheduling = flattenWorkflowTemplateJobsScheduling(c, m["scheduling"], res) + r.PrerequisiteStepIds = dcl.FlattenStringSlice(m["prerequisiteStepIds"]) + + return r +} + +// expandWorkflowTemplateJobsHadoopJobMap expands the contents of WorkflowTemplateJobsHadoopJob into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobMap(c *Client, f map[string]WorkflowTemplateJobsHadoopJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHadoopJobSlice expands the contents of WorkflowTemplateJobsHadoopJob into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobSlice(c *Client, f []WorkflowTemplateJobsHadoopJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHadoopJobMap flattens the contents of WorkflowTemplateJobsHadoopJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHadoopJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHadoopJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHadoopJob{} + } + + items := make(map[string]WorkflowTemplateJobsHadoopJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHadoopJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHadoopJobSlice flattens the contents of WorkflowTemplateJobsHadoopJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHadoopJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHadoopJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHadoopJob{} + } + + items := make([]WorkflowTemplateJobsHadoopJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHadoopJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHadoopJob expands an instance of WorkflowTemplateJobsHadoopJob into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJob(c *Client, f *WorkflowTemplateJobsHadoopJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainJarFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainJarFileUri"] = v + } + if v := f.MainClass; !dcl.IsEmptyValueIndirect(v) { + m["mainClass"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsHadoopJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHadoopJob flattens an instance of WorkflowTemplateJobsHadoopJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHadoopJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHadoopJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHadoopJob + } + r.MainJarFileUri = dcl.FlattenString(m["mainJarFileUri"]) + r.MainClass = dcl.FlattenString(m["mainClass"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsHadoopJobLoggingConfigMap expands the contents of WorkflowTemplateJobsHadoopJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsHadoopJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHadoopJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsHadoopJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsHadoopJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHadoopJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsHadoopJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHadoopJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsHadoopJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHadoopJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsHadoopJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHadoopJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsHadoopJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHadoopJobLoggingConfig expands an instance of WorkflowTemplateJobsHadoopJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobLoggingConfig(c *Client, f *WorkflowTemplateJobsHadoopJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHadoopJobLoggingConfig flattens an instance of WorkflowTemplateJobsHadoopJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHadoopJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHadoopJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSparkJobMap expands the contents of WorkflowTemplateJobsSparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobMap(c *Client, f map[string]WorkflowTemplateJobsSparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkJobSlice expands the contents of WorkflowTemplateJobsSparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobSlice(c *Client, f []WorkflowTemplateJobsSparkJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkJobMap flattens the contents of WorkflowTemplateJobsSparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkJob{} + } + + items := make(map[string]WorkflowTemplateJobsSparkJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkJobSlice flattens the contents of WorkflowTemplateJobsSparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkJob{} + } + + items := make([]WorkflowTemplateJobsSparkJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkJob expands an instance of WorkflowTemplateJobsSparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJob(c *Client, f *WorkflowTemplateJobsSparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainJarFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainJarFileUri"] = v + } + if v := f.MainClass; !dcl.IsEmptyValueIndirect(v) { + m["mainClass"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsSparkJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkJob flattens an instance of WorkflowTemplateJobsSparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkJob + } + r.MainJarFileUri = dcl.FlattenString(m["mainJarFileUri"]) + r.MainClass = dcl.FlattenString(m["mainClass"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsSparkJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsSparkJobLoggingConfigMap expands the contents of WorkflowTemplateJobsSparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsSparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsSparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsSparkJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsSparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsSparkJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsSparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsSparkJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkJobLoggingConfig expands an instance of WorkflowTemplateJobsSparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobLoggingConfig(c *Client, f *WorkflowTemplateJobsSparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkJobLoggingConfig flattens an instance of WorkflowTemplateJobsSparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsPysparkJobMap expands the contents of WorkflowTemplateJobsPysparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobMap(c *Client, f map[string]WorkflowTemplateJobsPysparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPysparkJobSlice expands the contents of WorkflowTemplateJobsPysparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobSlice(c *Client, f []WorkflowTemplateJobsPysparkJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPysparkJobMap flattens the contents of WorkflowTemplateJobsPysparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPysparkJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPysparkJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPysparkJob{} + } + + items := make(map[string]WorkflowTemplateJobsPysparkJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPysparkJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPysparkJobSlice flattens the contents of WorkflowTemplateJobsPysparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPysparkJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPysparkJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPysparkJob{} + } + + items := make([]WorkflowTemplateJobsPysparkJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPysparkJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPysparkJob expands an instance of WorkflowTemplateJobsPysparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJob(c *Client, f *WorkflowTemplateJobsPysparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainPythonFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainPythonFileUri"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.PythonFileUris; v != nil { + m["pythonFileUris"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsPysparkJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPysparkJob flattens an instance of WorkflowTemplateJobsPysparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPysparkJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPysparkJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPysparkJob + } + r.MainPythonFileUri = dcl.FlattenString(m["mainPythonFileUri"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.PythonFileUris = dcl.FlattenStringSlice(m["pythonFileUris"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsPysparkJobLoggingConfigMap expands the contents of WorkflowTemplateJobsPysparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsPysparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPysparkJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsPysparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsPysparkJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPysparkJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsPysparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPysparkJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsPysparkJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPysparkJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsPysparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPysparkJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsPysparkJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPysparkJobLoggingConfig expands an instance of WorkflowTemplateJobsPysparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobLoggingConfig(c *Client, f *WorkflowTemplateJobsPysparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPysparkJobLoggingConfig flattens an instance of WorkflowTemplateJobsPysparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPysparkJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPysparkJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsHiveJobMap expands the contents of WorkflowTemplateJobsHiveJob into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobMap(c *Client, f map[string]WorkflowTemplateJobsHiveJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHiveJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHiveJobSlice expands the contents of WorkflowTemplateJobsHiveJob into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobSlice(c *Client, f []WorkflowTemplateJobsHiveJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHiveJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHiveJobMap flattens the contents of WorkflowTemplateJobsHiveJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHiveJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHiveJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHiveJob{} + } + + items := make(map[string]WorkflowTemplateJobsHiveJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHiveJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHiveJobSlice flattens the contents of WorkflowTemplateJobsHiveJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHiveJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHiveJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHiveJob{} + } + + items := make([]WorkflowTemplateJobsHiveJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHiveJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHiveJob expands an instance of WorkflowTemplateJobsHiveJob into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJob(c *Client, f *WorkflowTemplateJobsHiveJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsHiveJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ContinueOnFailure; !dcl.IsEmptyValueIndirect(v) { + m["continueOnFailure"] = v + } + if v := f.ScriptVariables; !dcl.IsEmptyValueIndirect(v) { + m["scriptVariables"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHiveJob flattens an instance of WorkflowTemplateJobsHiveJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHiveJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHiveJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHiveJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsHiveJobQueryList(c, m["queryList"], res) + r.ContinueOnFailure = dcl.FlattenBool(m["continueOnFailure"]) + r.ScriptVariables = dcl.FlattenKeyValuePairs(m["scriptVariables"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + + return r +} + +// expandWorkflowTemplateJobsHiveJobQueryListMap expands the contents of WorkflowTemplateJobsHiveJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsHiveJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHiveJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHiveJobQueryListSlice expands the contents of WorkflowTemplateJobsHiveJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobQueryListSlice(c *Client, f []WorkflowTemplateJobsHiveJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHiveJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHiveJobQueryListMap flattens the contents of WorkflowTemplateJobsHiveJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHiveJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHiveJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHiveJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsHiveJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHiveJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHiveJobQueryListSlice flattens the contents of WorkflowTemplateJobsHiveJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHiveJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHiveJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHiveJobQueryList{} + } + + items := make([]WorkflowTemplateJobsHiveJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHiveJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHiveJobQueryList expands an instance of WorkflowTemplateJobsHiveJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobQueryList(c *Client, f *WorkflowTemplateJobsHiveJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHiveJobQueryList flattens an instance of WorkflowTemplateJobsHiveJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHiveJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHiveJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHiveJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsPigJobMap expands the contents of WorkflowTemplateJobsPigJob into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobMap(c *Client, f map[string]WorkflowTemplateJobsPigJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPigJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPigJobSlice expands the contents of WorkflowTemplateJobsPigJob into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobSlice(c *Client, f []WorkflowTemplateJobsPigJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPigJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPigJobMap flattens the contents of WorkflowTemplateJobsPigJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPigJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPigJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPigJob{} + } + + items := make(map[string]WorkflowTemplateJobsPigJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPigJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPigJobSlice flattens the contents of WorkflowTemplateJobsPigJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPigJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPigJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPigJob{} + } + + items := make([]WorkflowTemplateJobsPigJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPigJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPigJob expands an instance of WorkflowTemplateJobsPigJob into a JSON +// request object. +func expandWorkflowTemplateJobsPigJob(c *Client, f *WorkflowTemplateJobsPigJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsPigJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ContinueOnFailure; !dcl.IsEmptyValueIndirect(v) { + m["continueOnFailure"] = v + } + if v := f.ScriptVariables; !dcl.IsEmptyValueIndirect(v) { + m["scriptVariables"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v, err := expandWorkflowTemplateJobsPigJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPigJob flattens an instance of WorkflowTemplateJobsPigJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPigJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPigJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPigJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsPigJobQueryList(c, m["queryList"], res) + r.ContinueOnFailure = dcl.FlattenBool(m["continueOnFailure"]) + r.ScriptVariables = dcl.FlattenKeyValuePairs(m["scriptVariables"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.LoggingConfig = flattenWorkflowTemplateJobsPigJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsPigJobQueryListMap expands the contents of WorkflowTemplateJobsPigJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsPigJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPigJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPigJobQueryListSlice expands the contents of WorkflowTemplateJobsPigJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobQueryListSlice(c *Client, f []WorkflowTemplateJobsPigJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPigJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPigJobQueryListMap flattens the contents of WorkflowTemplateJobsPigJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPigJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPigJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPigJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsPigJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPigJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPigJobQueryListSlice flattens the contents of WorkflowTemplateJobsPigJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPigJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPigJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPigJobQueryList{} + } + + items := make([]WorkflowTemplateJobsPigJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPigJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPigJobQueryList expands an instance of WorkflowTemplateJobsPigJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobQueryList(c *Client, f *WorkflowTemplateJobsPigJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPigJobQueryList flattens an instance of WorkflowTemplateJobsPigJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPigJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPigJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPigJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsPigJobLoggingConfigMap expands the contents of WorkflowTemplateJobsPigJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsPigJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPigJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPigJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsPigJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsPigJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPigJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPigJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsPigJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPigJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPigJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPigJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsPigJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPigJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPigJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsPigJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPigJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPigJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPigJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsPigJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPigJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPigJobLoggingConfig expands an instance of WorkflowTemplateJobsPigJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobLoggingConfig(c *Client, f *WorkflowTemplateJobsPigJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPigJobLoggingConfig flattens an instance of WorkflowTemplateJobsPigJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPigJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPigJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSparkRJobMap expands the contents of WorkflowTemplateJobsSparkRJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobMap(c *Client, f map[string]WorkflowTemplateJobsSparkRJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkRJobSlice expands the contents of WorkflowTemplateJobsSparkRJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobSlice(c *Client, f []WorkflowTemplateJobsSparkRJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkRJobMap flattens the contents of WorkflowTemplateJobsSparkRJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkRJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkRJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkRJob{} + } + + items := make(map[string]WorkflowTemplateJobsSparkRJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkRJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkRJobSlice flattens the contents of WorkflowTemplateJobsSparkRJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkRJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkRJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkRJob{} + } + + items := make([]WorkflowTemplateJobsSparkRJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkRJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkRJob expands an instance of WorkflowTemplateJobsSparkRJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJob(c *Client, f *WorkflowTemplateJobsSparkRJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainRFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainRFileUri"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsSparkRJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkRJob flattens an instance of WorkflowTemplateJobsSparkRJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkRJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkRJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkRJob + } + r.MainRFileUri = dcl.FlattenString(m["mainRFileUri"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsSparkRJobLoggingConfigMap expands the contents of WorkflowTemplateJobsSparkRJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsSparkRJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkRJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsSparkRJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsSparkRJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkRJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsSparkRJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkRJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsSparkRJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkRJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsSparkRJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkRJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsSparkRJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkRJobLoggingConfig expands an instance of WorkflowTemplateJobsSparkRJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobLoggingConfig(c *Client, f *WorkflowTemplateJobsSparkRJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkRJobLoggingConfig flattens an instance of WorkflowTemplateJobsSparkRJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkRJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkRJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSparkSqlJobMap expands the contents of WorkflowTemplateJobsSparkSqlJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobMap(c *Client, f map[string]WorkflowTemplateJobsSparkSqlJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkSqlJobSlice expands the contents of WorkflowTemplateJobsSparkSqlJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobSlice(c *Client, f []WorkflowTemplateJobsSparkSqlJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobMap flattens the contents of WorkflowTemplateJobsSparkSqlJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkSqlJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkSqlJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkSqlJob{} + } + + items := make(map[string]WorkflowTemplateJobsSparkSqlJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkSqlJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkSqlJobSlice flattens the contents of WorkflowTemplateJobsSparkSqlJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkSqlJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkSqlJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkSqlJob{} + } + + items := make([]WorkflowTemplateJobsSparkSqlJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkSqlJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkSqlJob expands an instance of WorkflowTemplateJobsSparkSqlJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJob(c *Client, f *WorkflowTemplateJobsSparkSqlJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsSparkSqlJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ScriptVariables; !dcl.IsEmptyValueIndirect(v) { + m["scriptVariables"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v, err := expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJob flattens an instance of WorkflowTemplateJobsSparkSqlJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkSqlJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkSqlJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkSqlJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsSparkSqlJobQueryList(c, m["queryList"], res) + r.ScriptVariables = dcl.FlattenKeyValuePairs(m["scriptVariables"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.LoggingConfig = flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsSparkSqlJobQueryListMap expands the contents of WorkflowTemplateJobsSparkSqlJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsSparkSqlJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkSqlJobQueryListSlice expands the contents of WorkflowTemplateJobsSparkSqlJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobQueryListSlice(c *Client, f []WorkflowTemplateJobsSparkSqlJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobQueryListMap flattens the contents of WorkflowTemplateJobsSparkSqlJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkSqlJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsSparkSqlJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkSqlJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkSqlJobQueryListSlice flattens the contents of WorkflowTemplateJobsSparkSqlJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkSqlJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + items := make([]WorkflowTemplateJobsSparkSqlJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkSqlJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkSqlJobQueryList expands an instance of WorkflowTemplateJobsSparkSqlJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobQueryList(c *Client, f *WorkflowTemplateJobsSparkSqlJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobQueryList flattens an instance of WorkflowTemplateJobsSparkSqlJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkSqlJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkSqlJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsSparkSqlJobLoggingConfigMap expands the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsSparkSqlJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsSparkSqlJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkSqlJobLoggingConfig expands an instance of WorkflowTemplateJobsSparkSqlJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c *Client, f *WorkflowTemplateJobsSparkSqlJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig flattens an instance of WorkflowTemplateJobsSparkSqlJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsPrestoJobMap expands the contents of WorkflowTemplateJobsPrestoJob into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobMap(c *Client, f map[string]WorkflowTemplateJobsPrestoJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPrestoJobSlice expands the contents of WorkflowTemplateJobsPrestoJob into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobSlice(c *Client, f []WorkflowTemplateJobsPrestoJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPrestoJobMap flattens the contents of WorkflowTemplateJobsPrestoJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPrestoJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPrestoJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPrestoJob{} + } + + items := make(map[string]WorkflowTemplateJobsPrestoJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPrestoJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPrestoJobSlice flattens the contents of WorkflowTemplateJobsPrestoJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPrestoJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPrestoJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPrestoJob{} + } + + items := make([]WorkflowTemplateJobsPrestoJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPrestoJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPrestoJob expands an instance of WorkflowTemplateJobsPrestoJob into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJob(c *Client, f *WorkflowTemplateJobsPrestoJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsPrestoJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ContinueOnFailure; !dcl.IsEmptyValueIndirect(v) { + m["continueOnFailure"] = v + } + if v := f.OutputFormat; !dcl.IsEmptyValueIndirect(v) { + m["outputFormat"] = v + } + if v := f.ClientTags; v != nil { + m["clientTags"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsPrestoJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPrestoJob flattens an instance of WorkflowTemplateJobsPrestoJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPrestoJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPrestoJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPrestoJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsPrestoJobQueryList(c, m["queryList"], res) + r.ContinueOnFailure = dcl.FlattenBool(m["continueOnFailure"]) + r.OutputFormat = dcl.FlattenString(m["outputFormat"]) + r.ClientTags = dcl.FlattenStringSlice(m["clientTags"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsPrestoJobQueryListMap expands the contents of WorkflowTemplateJobsPrestoJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsPrestoJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPrestoJobQueryListSlice expands the contents of WorkflowTemplateJobsPrestoJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobQueryListSlice(c *Client, f []WorkflowTemplateJobsPrestoJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPrestoJobQueryListMap flattens the contents of WorkflowTemplateJobsPrestoJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPrestoJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPrestoJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPrestoJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsPrestoJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPrestoJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPrestoJobQueryListSlice flattens the contents of WorkflowTemplateJobsPrestoJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPrestoJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPrestoJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPrestoJobQueryList{} + } + + items := make([]WorkflowTemplateJobsPrestoJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPrestoJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPrestoJobQueryList expands an instance of WorkflowTemplateJobsPrestoJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobQueryList(c *Client, f *WorkflowTemplateJobsPrestoJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPrestoJobQueryList flattens an instance of WorkflowTemplateJobsPrestoJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPrestoJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPrestoJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPrestoJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsPrestoJobLoggingConfigMap expands the contents of WorkflowTemplateJobsPrestoJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsPrestoJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPrestoJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsPrestoJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsPrestoJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPrestoJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsPrestoJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPrestoJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsPrestoJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPrestoJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsPrestoJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPrestoJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsPrestoJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPrestoJobLoggingConfig expands an instance of WorkflowTemplateJobsPrestoJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobLoggingConfig(c *Client, f *WorkflowTemplateJobsPrestoJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPrestoJobLoggingConfig flattens an instance of WorkflowTemplateJobsPrestoJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPrestoJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPrestoJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSchedulingMap expands the contents of WorkflowTemplateJobsScheduling into a JSON +// request object. +func expandWorkflowTemplateJobsSchedulingMap(c *Client, f map[string]WorkflowTemplateJobsScheduling, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsScheduling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSchedulingSlice expands the contents of WorkflowTemplateJobsScheduling into a JSON +// request object. +func expandWorkflowTemplateJobsSchedulingSlice(c *Client, f []WorkflowTemplateJobsScheduling, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsScheduling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSchedulingMap flattens the contents of WorkflowTemplateJobsScheduling from a JSON +// response object. +func flattenWorkflowTemplateJobsSchedulingMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsScheduling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsScheduling{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsScheduling{} + } + + items := make(map[string]WorkflowTemplateJobsScheduling) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsScheduling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSchedulingSlice flattens the contents of WorkflowTemplateJobsScheduling from a JSON +// response object. +func flattenWorkflowTemplateJobsSchedulingSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsScheduling { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsScheduling{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsScheduling{} + } + + items := make([]WorkflowTemplateJobsScheduling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsScheduling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsScheduling expands an instance of WorkflowTemplateJobsScheduling into a JSON +// request object. +func expandWorkflowTemplateJobsScheduling(c *Client, f *WorkflowTemplateJobsScheduling, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxFailuresPerHour; !dcl.IsEmptyValueIndirect(v) { + m["maxFailuresPerHour"] = v + } + if v := f.MaxFailuresTotal; !dcl.IsEmptyValueIndirect(v) { + m["maxFailuresTotal"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsScheduling flattens an instance of WorkflowTemplateJobsScheduling from a JSON +// response object. +func flattenWorkflowTemplateJobsScheduling(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsScheduling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsScheduling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsScheduling + } + r.MaxFailuresPerHour = dcl.FlattenInteger(m["maxFailuresPerHour"]) + r.MaxFailuresTotal = dcl.FlattenInteger(m["maxFailuresTotal"]) + + return r +} + +// expandWorkflowTemplateParametersMap expands the contents of WorkflowTemplateParameters into a JSON +// request object. +func expandWorkflowTemplateParametersMap(c *Client, f map[string]WorkflowTemplateParameters, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParameters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersSlice expands the contents of WorkflowTemplateParameters into a JSON +// request object. +func expandWorkflowTemplateParametersSlice(c *Client, f []WorkflowTemplateParameters, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParameters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersMap flattens the contents of WorkflowTemplateParameters from a JSON +// response object. +func flattenWorkflowTemplateParametersMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParameters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParameters{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParameters{} + } + + items := make(map[string]WorkflowTemplateParameters) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParameters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersSlice flattens the contents of WorkflowTemplateParameters from a JSON +// response object. +func flattenWorkflowTemplateParametersSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParameters { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParameters{} + } + + if len(a) == 0 { + return []WorkflowTemplateParameters{} + } + + items := make([]WorkflowTemplateParameters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParameters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParameters expands an instance of WorkflowTemplateParameters into a JSON +// request object. +func expandWorkflowTemplateParameters(c *Client, f *WorkflowTemplateParameters, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Name; !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Fields; v != nil { + m["fields"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + m["description"] = v + } + if v, err := expandWorkflowTemplateParametersValidation(c, f.Validation, res); err != nil { + return nil, fmt.Errorf("error expanding Validation into validation: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["validation"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParameters flattens an instance of WorkflowTemplateParameters from a JSON +// response object. +func flattenWorkflowTemplateParameters(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParameters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParameters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParameters + } + r.Name = dcl.FlattenString(m["name"]) + r.Fields = dcl.FlattenStringSlice(m["fields"]) + r.Description = dcl.FlattenString(m["description"]) + r.Validation = flattenWorkflowTemplateParametersValidation(c, m["validation"], res) + + return r +} + +// expandWorkflowTemplateParametersValidationMap expands the contents of WorkflowTemplateParametersValidation into a JSON +// request object. +func expandWorkflowTemplateParametersValidationMap(c *Client, f map[string]WorkflowTemplateParametersValidation, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParametersValidation(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersValidationSlice expands the contents of WorkflowTemplateParametersValidation into a JSON +// request object. +func expandWorkflowTemplateParametersValidationSlice(c *Client, f []WorkflowTemplateParametersValidation, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParametersValidation(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersValidationMap flattens the contents of WorkflowTemplateParametersValidation from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParametersValidation { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParametersValidation{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParametersValidation{} + } + + items := make(map[string]WorkflowTemplateParametersValidation) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParametersValidation(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersValidationSlice flattens the contents of WorkflowTemplateParametersValidation from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParametersValidation { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParametersValidation{} + } + + if len(a) == 0 { + return []WorkflowTemplateParametersValidation{} + } + + items := make([]WorkflowTemplateParametersValidation, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParametersValidation(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParametersValidation expands an instance of WorkflowTemplateParametersValidation into a JSON +// request object. +func expandWorkflowTemplateParametersValidation(c *Client, f *WorkflowTemplateParametersValidation, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplateParametersValidationRegex(c, f.Regex, res); err != nil { + return nil, fmt.Errorf("error expanding Regex into regex: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["regex"] = v + } + if v, err := expandWorkflowTemplateParametersValidationValues(c, f.Values, res); err != nil { + return nil, fmt.Errorf("error expanding Values into values: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["values"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParametersValidation flattens an instance of WorkflowTemplateParametersValidation from a JSON +// response object. +func flattenWorkflowTemplateParametersValidation(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParametersValidation { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParametersValidation{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParametersValidation + } + r.Regex = flattenWorkflowTemplateParametersValidationRegex(c, m["regex"], res) + r.Values = flattenWorkflowTemplateParametersValidationValues(c, m["values"], res) + + return r +} + +// expandWorkflowTemplateParametersValidationRegexMap expands the contents of WorkflowTemplateParametersValidationRegex into a JSON +// request object. +func expandWorkflowTemplateParametersValidationRegexMap(c *Client, f map[string]WorkflowTemplateParametersValidationRegex, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParametersValidationRegex(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersValidationRegexSlice expands the contents of WorkflowTemplateParametersValidationRegex into a JSON +// request object. +func expandWorkflowTemplateParametersValidationRegexSlice(c *Client, f []WorkflowTemplateParametersValidationRegex, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParametersValidationRegex(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersValidationRegexMap flattens the contents of WorkflowTemplateParametersValidationRegex from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationRegexMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParametersValidationRegex { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParametersValidationRegex{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParametersValidationRegex{} + } + + items := make(map[string]WorkflowTemplateParametersValidationRegex) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParametersValidationRegex(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersValidationRegexSlice flattens the contents of WorkflowTemplateParametersValidationRegex from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationRegexSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParametersValidationRegex { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParametersValidationRegex{} + } + + if len(a) == 0 { + return []WorkflowTemplateParametersValidationRegex{} + } + + items := make([]WorkflowTemplateParametersValidationRegex, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParametersValidationRegex(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParametersValidationRegex expands an instance of WorkflowTemplateParametersValidationRegex into a JSON +// request object. +func expandWorkflowTemplateParametersValidationRegex(c *Client, f *WorkflowTemplateParametersValidationRegex, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Regexes; v != nil { + m["regexes"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParametersValidationRegex flattens an instance of WorkflowTemplateParametersValidationRegex from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationRegex(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParametersValidationRegex { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParametersValidationRegex{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParametersValidationRegex + } + r.Regexes = dcl.FlattenStringSlice(m["regexes"]) + + return r +} + +// expandWorkflowTemplateParametersValidationValuesMap expands the contents of WorkflowTemplateParametersValidationValues into a JSON +// request object. +func expandWorkflowTemplateParametersValidationValuesMap(c *Client, f map[string]WorkflowTemplateParametersValidationValues, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParametersValidationValues(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersValidationValuesSlice expands the contents of WorkflowTemplateParametersValidationValues into a JSON +// request object. +func expandWorkflowTemplateParametersValidationValuesSlice(c *Client, f []WorkflowTemplateParametersValidationValues, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParametersValidationValues(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersValidationValuesMap flattens the contents of WorkflowTemplateParametersValidationValues from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationValuesMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParametersValidationValues { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParametersValidationValues{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParametersValidationValues{} + } + + items := make(map[string]WorkflowTemplateParametersValidationValues) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParametersValidationValues(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersValidationValuesSlice flattens the contents of WorkflowTemplateParametersValidationValues from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationValuesSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParametersValidationValues { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParametersValidationValues{} + } + + if len(a) == 0 { + return []WorkflowTemplateParametersValidationValues{} + } + + items := make([]WorkflowTemplateParametersValidationValues, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParametersValidationValues(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParametersValidationValues expands an instance of WorkflowTemplateParametersValidationValues into a JSON +// request object. +func expandWorkflowTemplateParametersValidationValues(c *Client, f *WorkflowTemplateParametersValidationValues, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Values; v != nil { + m["values"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParametersValidationValues flattens an instance of WorkflowTemplateParametersValidationValues from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationValues(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParametersValidationValues { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParametersValidationValues{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParametersValidationValues + } + r.Values = dcl.FlattenStringSlice(m["values"]) + + return r +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *WorkflowTemplate) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalWorkflowTemplate(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type workflowTemplateDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp workflowTemplateApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToWorkflowTemplateDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]workflowTemplateDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []workflowTemplateDiff + // For each operation name, create a workflowTemplateDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := workflowTemplateDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToWorkflowTemplateApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToWorkflowTemplateApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (workflowTemplateApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractWorkflowTemplateFields(r *WorkflowTemplate) error { + vEncryptionConfig := r.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplateEncryptionConfig{} + } + if err := extractWorkflowTemplateEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + r.EncryptionConfig = vEncryptionConfig + } + vPlacement := r.Placement + if vPlacement == nil { + // note: explicitly not the empty object. + vPlacement = &WorkflowTemplatePlacement{} + } + if err := extractWorkflowTemplatePlacementFields(r, vPlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPlacement) { + r.Placement = vPlacement + } + return nil +} +func extractWorkflowTemplateEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplateEncryptionConfig) error { + return nil +} +func extractWorkflowTemplatePlacementFields(r *WorkflowTemplate, o *WorkflowTemplatePlacement) error { + vManagedCluster := o.ManagedCluster + if vManagedCluster == nil { + // note: explicitly not the empty object. + vManagedCluster = &WorkflowTemplatePlacementManagedCluster{} + } + if err := extractWorkflowTemplatePlacementManagedClusterFields(r, vManagedCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedCluster) { + o.ManagedCluster = vManagedCluster + } + vClusterSelector := o.ClusterSelector + if vClusterSelector == nil { + // note: explicitly not the empty object. + vClusterSelector = &WorkflowTemplatePlacementClusterSelector{} + } + if err := extractWorkflowTemplatePlacementClusterSelectorFields(r, vClusterSelector); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vClusterSelector) { + o.ClusterSelector = vClusterSelector + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedCluster) error { + vConfig := o.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &WorkflowTemplatePlacementManagedClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + o.Config = vConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfig) error { + vGceClusterConfig := o.GceClusterConfig + if vGceClusterConfig == nil { + // note: explicitly not the empty object. + vGceClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { + o.GceClusterConfig = vGceClusterConfig + } + vMasterConfig := o.MasterConfig + if vMasterConfig == nil { + // note: explicitly not the empty object. + vMasterConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMasterConfig) { + o.MasterConfig = vMasterConfig + } + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := o.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + o.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + vSoftwareConfig := o.SoftwareConfig + if vSoftwareConfig == nil { + // note: explicitly not the empty object. + vSoftwareConfig = &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { + o.SoftwareConfig = vSoftwareConfig + } + vEncryptionConfig := o.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + o.EncryptionConfig = vEncryptionConfig + } + vAutoscalingConfig := o.AutoscalingConfig + if vAutoscalingConfig == nil { + // note: explicitly not the empty object. + vAutoscalingConfig = &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { + o.AutoscalingConfig = vAutoscalingConfig + } + vSecurityConfig := o.SecurityConfig + if vSecurityConfig == nil { + // note: explicitly not the empty object. + vSecurityConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityConfig) { + o.SecurityConfig = vSecurityConfig + } + vLifecycleConfig := o.LifecycleConfig + if vLifecycleConfig == nil { + // note: explicitly not the empty object. + vLifecycleConfig = &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { + o.LifecycleConfig = vLifecycleConfig + } + vEndpointConfig := o.EndpointConfig + if vEndpointConfig == nil { + // note: explicitly not the empty object. + vEndpointConfig = &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpointConfig) { + o.EndpointConfig = vEndpointConfig + } +{{- if ne $.TargetVersionName "ga" }} + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } +{{- end }} + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) error { + vReservationAffinity := o.ReservationAffinity + if vReservationAffinity == nil { + // note: explicitly not the empty object. + vReservationAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReservationAffinity) { + o.ReservationAffinity = vReservationAffinity + } + vNodeGroupAffinity := o.NodeGroupAffinity + if vNodeGroupAffinity == nil { + // note: explicitly not the empty object. + vNodeGroupAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { + o.NodeGroupAffinity = vNodeGroupAffinity + } + vShieldedInstanceConfig := o.ShieldedInstanceConfig + if vShieldedInstanceConfig == nil { + // note: explicitly not the empty object. + vShieldedInstanceConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { + o.ShieldedInstanceConfig = vShieldedInstanceConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigInitializationActionsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) error { + vKerberosConfig := o.KerberosConfig + if vKerberosConfig == nil { + // note: explicitly not the empty object. + vKerberosConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKerberosConfig) { + o.KerberosConfig = vKerberosConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) error { + vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget + if vNamespacedGkeDeploymentTarget == nil { + // note: explicitly not the empty object. + vNamespacedGkeDeploymentTarget = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { + o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) error { + return nil +} +{{- end }} +func extractWorkflowTemplatePlacementClusterSelectorFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementClusterSelector) error { + return nil +} +func extractWorkflowTemplateJobsFields(r *WorkflowTemplate, o *WorkflowTemplateJobs) error { + vHadoopJob := o.HadoopJob + if vHadoopJob == nil { + // note: explicitly not the empty object. + vHadoopJob = &WorkflowTemplateJobsHadoopJob{} + } + if err := extractWorkflowTemplateJobsHadoopJobFields(r, vHadoopJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHadoopJob) { + o.HadoopJob = vHadoopJob + } + vSparkJob := o.SparkJob + if vSparkJob == nil { + // note: explicitly not the empty object. + vSparkJob = &WorkflowTemplateJobsSparkJob{} + } + if err := extractWorkflowTemplateJobsSparkJobFields(r, vSparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkJob) { + o.SparkJob = vSparkJob + } + vPysparkJob := o.PysparkJob + if vPysparkJob == nil { + // note: explicitly not the empty object. + vPysparkJob = &WorkflowTemplateJobsPysparkJob{} + } + if err := extractWorkflowTemplateJobsPysparkJobFields(r, vPysparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPysparkJob) { + o.PysparkJob = vPysparkJob + } + vHiveJob := o.HiveJob + if vHiveJob == nil { + // note: explicitly not the empty object. + vHiveJob = &WorkflowTemplateJobsHiveJob{} + } + if err := extractWorkflowTemplateJobsHiveJobFields(r, vHiveJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHiveJob) { + o.HiveJob = vHiveJob + } + vPigJob := o.PigJob + if vPigJob == nil { + // note: explicitly not the empty object. + vPigJob = &WorkflowTemplateJobsPigJob{} + } + if err := extractWorkflowTemplateJobsPigJobFields(r, vPigJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPigJob) { + o.PigJob = vPigJob + } + vSparkRJob := o.SparkRJob + if vSparkRJob == nil { + // note: explicitly not the empty object. + vSparkRJob = &WorkflowTemplateJobsSparkRJob{} + } + if err := extractWorkflowTemplateJobsSparkRJobFields(r, vSparkRJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkRJob) { + o.SparkRJob = vSparkRJob + } + vSparkSqlJob := o.SparkSqlJob + if vSparkSqlJob == nil { + // note: explicitly not the empty object. + vSparkSqlJob = &WorkflowTemplateJobsSparkSqlJob{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobFields(r, vSparkSqlJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkSqlJob) { + o.SparkSqlJob = vSparkSqlJob + } + vPrestoJob := o.PrestoJob + if vPrestoJob == nil { + // note: explicitly not the empty object. + vPrestoJob = &WorkflowTemplateJobsPrestoJob{} + } + if err := extractWorkflowTemplateJobsPrestoJobFields(r, vPrestoJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrestoJob) { + o.PrestoJob = vPrestoJob + } + vScheduling := o.Scheduling + if vScheduling == nil { + // note: explicitly not the empty object. + vScheduling = &WorkflowTemplateJobsScheduling{} + } + if err := extractWorkflowTemplateJobsSchedulingFields(r, vScheduling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vScheduling) { + o.Scheduling = vScheduling + } + return nil +} +func extractWorkflowTemplateJobsHadoopJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsSparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsPysparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsHiveJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsHiveJobQueryList{} + } + if err := extractWorkflowTemplateJobsHiveJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + return nil +} +func extractWorkflowTemplateJobsHiveJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsPigJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPigJobQueryList{} + } + if err := extractWorkflowTemplateJobsPigJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPigJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPigJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsPigJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsPigJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSparkRJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSparkSqlJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsSparkSqlJobQueryList{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsSparkSqlJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsPrestoJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPrestoJobQueryList{} + } + if err := extractWorkflowTemplateJobsPrestoJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsPrestoJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSchedulingFields(r *WorkflowTemplate, o *WorkflowTemplateJobsScheduling) error { + return nil +} +func extractWorkflowTemplateParametersFields(r *WorkflowTemplate, o *WorkflowTemplateParameters) error { + vValidation := o.Validation + if vValidation == nil { + // note: explicitly not the empty object. + vValidation = &WorkflowTemplateParametersValidation{} + } + if err := extractWorkflowTemplateParametersValidationFields(r, vValidation); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValidation) { + o.Validation = vValidation + } + return nil +} +func extractWorkflowTemplateParametersValidationFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidation) error { + vRegex := o.Regex + if vRegex == nil { + // note: explicitly not the empty object. + vRegex = &WorkflowTemplateParametersValidationRegex{} + } + if err := extractWorkflowTemplateParametersValidationRegexFields(r, vRegex); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRegex) { + o.Regex = vRegex + } + vValues := o.Values + if vValues == nil { + // note: explicitly not the empty object. + vValues = &WorkflowTemplateParametersValidationValues{} + } + if err := extractWorkflowTemplateParametersValidationValuesFields(r, vValues); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValues) { + o.Values = vValues + } + return nil +} +func extractWorkflowTemplateParametersValidationRegexFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationRegex) error { + return nil +} +func extractWorkflowTemplateParametersValidationValuesFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationValues) error { + return nil +} + +func postReadExtractWorkflowTemplateFields(r *WorkflowTemplate) error { + vEncryptionConfig := r.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplateEncryptionConfig{} + } + if err := postReadExtractWorkflowTemplateEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + r.EncryptionConfig = vEncryptionConfig + } + vPlacement := r.Placement + if vPlacement == nil { + // note: explicitly not the empty object. + vPlacement = &WorkflowTemplatePlacement{} + } + if err := postReadExtractWorkflowTemplatePlacementFields(r, vPlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPlacement) { + r.Placement = vPlacement + } + return nil +} +func postReadExtractWorkflowTemplateEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplateEncryptionConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementFields(r *WorkflowTemplate, o *WorkflowTemplatePlacement) error { + vManagedCluster := o.ManagedCluster + if vManagedCluster == nil { + // note: explicitly not the empty object. + vManagedCluster = &WorkflowTemplatePlacementManagedCluster{} + } + if err := extractWorkflowTemplatePlacementManagedClusterFields(r, vManagedCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedCluster) { + o.ManagedCluster = vManagedCluster + } + vClusterSelector := o.ClusterSelector + if vClusterSelector == nil { + // note: explicitly not the empty object. + vClusterSelector = &WorkflowTemplatePlacementClusterSelector{} + } + if err := extractWorkflowTemplatePlacementClusterSelectorFields(r, vClusterSelector); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vClusterSelector) { + o.ClusterSelector = vClusterSelector + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedCluster) error { + vConfig := o.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &WorkflowTemplatePlacementManagedClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + o.Config = vConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfig) error { + vGceClusterConfig := o.GceClusterConfig + if vGceClusterConfig == nil { + // note: explicitly not the empty object. + vGceClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { + o.GceClusterConfig = vGceClusterConfig + } + vMasterConfig := o.MasterConfig + if vMasterConfig == nil { + // note: explicitly not the empty object. + vMasterConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMasterConfig) { + o.MasterConfig = vMasterConfig + } + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := o.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + o.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + vSoftwareConfig := o.SoftwareConfig + if vSoftwareConfig == nil { + // note: explicitly not the empty object. + vSoftwareConfig = &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { + o.SoftwareConfig = vSoftwareConfig + } + vEncryptionConfig := o.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + o.EncryptionConfig = vEncryptionConfig + } + vAutoscalingConfig := o.AutoscalingConfig + if vAutoscalingConfig == nil { + // note: explicitly not the empty object. + vAutoscalingConfig = &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { + o.AutoscalingConfig = vAutoscalingConfig + } + vSecurityConfig := o.SecurityConfig + if vSecurityConfig == nil { + // note: explicitly not the empty object. + vSecurityConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityConfig) { + o.SecurityConfig = vSecurityConfig + } + vLifecycleConfig := o.LifecycleConfig + if vLifecycleConfig == nil { + // note: explicitly not the empty object. + vLifecycleConfig = &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { + o.LifecycleConfig = vLifecycleConfig + } + vEndpointConfig := o.EndpointConfig + if vEndpointConfig == nil { + // note: explicitly not the empty object. + vEndpointConfig = &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpointConfig) { + o.EndpointConfig = vEndpointConfig + } +{{- if ne $.TargetVersionName "ga" }} + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } +{{- end }} + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) error { + vReservationAffinity := o.ReservationAffinity + if vReservationAffinity == nil { + // note: explicitly not the empty object. + vReservationAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReservationAffinity) { + o.ReservationAffinity = vReservationAffinity + } + vNodeGroupAffinity := o.NodeGroupAffinity + if vNodeGroupAffinity == nil { + // note: explicitly not the empty object. + vNodeGroupAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { + o.NodeGroupAffinity = vNodeGroupAffinity + } + vShieldedInstanceConfig := o.ShieldedInstanceConfig + if vShieldedInstanceConfig == nil { + // note: explicitly not the empty object. + vShieldedInstanceConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { + o.ShieldedInstanceConfig = vShieldedInstanceConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigInitializationActionsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) error { + vKerberosConfig := o.KerberosConfig + if vKerberosConfig == nil { + // note: explicitly not the empty object. + vKerberosConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKerberosConfig) { + o.KerberosConfig = vKerberosConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) error { + vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget + if vNamespacedGkeDeploymentTarget == nil { + // note: explicitly not the empty object. + vNamespacedGkeDeploymentTarget = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { + o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) error { + return nil +} +{{- end }} +func postReadExtractWorkflowTemplatePlacementClusterSelectorFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementClusterSelector) error { + return nil +} +func postReadExtractWorkflowTemplateJobsFields(r *WorkflowTemplate, o *WorkflowTemplateJobs) error { + vHadoopJob := o.HadoopJob + if vHadoopJob == nil { + // note: explicitly not the empty object. + vHadoopJob = &WorkflowTemplateJobsHadoopJob{} + } + if err := extractWorkflowTemplateJobsHadoopJobFields(r, vHadoopJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHadoopJob) { + o.HadoopJob = vHadoopJob + } + vSparkJob := o.SparkJob + if vSparkJob == nil { + // note: explicitly not the empty object. + vSparkJob = &WorkflowTemplateJobsSparkJob{} + } + if err := extractWorkflowTemplateJobsSparkJobFields(r, vSparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkJob) { + o.SparkJob = vSparkJob + } + vPysparkJob := o.PysparkJob + if vPysparkJob == nil { + // note: explicitly not the empty object. + vPysparkJob = &WorkflowTemplateJobsPysparkJob{} + } + if err := extractWorkflowTemplateJobsPysparkJobFields(r, vPysparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPysparkJob) { + o.PysparkJob = vPysparkJob + } + vHiveJob := o.HiveJob + if vHiveJob == nil { + // note: explicitly not the empty object. + vHiveJob = &WorkflowTemplateJobsHiveJob{} + } + if err := extractWorkflowTemplateJobsHiveJobFields(r, vHiveJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHiveJob) { + o.HiveJob = vHiveJob + } + vPigJob := o.PigJob + if vPigJob == nil { + // note: explicitly not the empty object. + vPigJob = &WorkflowTemplateJobsPigJob{} + } + if err := extractWorkflowTemplateJobsPigJobFields(r, vPigJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPigJob) { + o.PigJob = vPigJob + } + vSparkRJob := o.SparkRJob + if vSparkRJob == nil { + // note: explicitly not the empty object. + vSparkRJob = &WorkflowTemplateJobsSparkRJob{} + } + if err := extractWorkflowTemplateJobsSparkRJobFields(r, vSparkRJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkRJob) { + o.SparkRJob = vSparkRJob + } + vSparkSqlJob := o.SparkSqlJob + if vSparkSqlJob == nil { + // note: explicitly not the empty object. + vSparkSqlJob = &WorkflowTemplateJobsSparkSqlJob{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobFields(r, vSparkSqlJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkSqlJob) { + o.SparkSqlJob = vSparkSqlJob + } + vPrestoJob := o.PrestoJob + if vPrestoJob == nil { + // note: explicitly not the empty object. + vPrestoJob = &WorkflowTemplateJobsPrestoJob{} + } + if err := extractWorkflowTemplateJobsPrestoJobFields(r, vPrestoJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrestoJob) { + o.PrestoJob = vPrestoJob + } + vScheduling := o.Scheduling + if vScheduling == nil { + // note: explicitly not the empty object. + vScheduling = &WorkflowTemplateJobsScheduling{} + } + if err := extractWorkflowTemplateJobsSchedulingFields(r, vScheduling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vScheduling) { + o.Scheduling = vScheduling + } + return nil +} +func postReadExtractWorkflowTemplateJobsHadoopJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsSparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPysparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsHiveJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsHiveJobQueryList{} + } + if err := extractWorkflowTemplateJobsHiveJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + return nil +} +func postReadExtractWorkflowTemplateJobsHiveJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPigJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPigJobQueryList{} + } + if err := extractWorkflowTemplateJobsPigJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPigJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPigJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsPigJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPigJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkRJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkSqlJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsSparkSqlJobQueryList{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsSparkSqlJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPrestoJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPrestoJobQueryList{} + } + if err := extractWorkflowTemplateJobsPrestoJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsPrestoJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSchedulingFields(r *WorkflowTemplate, o *WorkflowTemplateJobsScheduling) error { + return nil +} +func postReadExtractWorkflowTemplateParametersFields(r *WorkflowTemplate, o *WorkflowTemplateParameters) error { + vValidation := o.Validation + if vValidation == nil { + // note: explicitly not the empty object. + vValidation = &WorkflowTemplateParametersValidation{} + } + if err := extractWorkflowTemplateParametersValidationFields(r, vValidation); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValidation) { + o.Validation = vValidation + } + return nil +} +func postReadExtractWorkflowTemplateParametersValidationFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidation) error { + vRegex := o.Regex + if vRegex == nil { + // note: explicitly not the empty object. + vRegex = &WorkflowTemplateParametersValidationRegex{} + } + if err := extractWorkflowTemplateParametersValidationRegexFields(r, vRegex); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRegex) { + o.Regex = vRegex + } + vValues := o.Values + if vValues == nil { + // note: explicitly not the empty object. + vValues = &WorkflowTemplateParametersValidationValues{} + } + if err := extractWorkflowTemplateParametersValidationValuesFields(r, vValues); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValues) { + o.Values = vValues + } + return nil +} +func postReadExtractWorkflowTemplateParametersValidationRegexFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationRegex) error { + return nil +} +func postReadExtractWorkflowTemplateParametersValidationValuesFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationValues) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/client.go b/mmv1/third_party/terraform/services/firebaserules/client.go new file mode 100644 index 000000000000..ef1efc0c177f --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/client.go @@ -0,0 +1,18 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go new file mode 100644 index 000000000000..13ffd583df1b --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLFirebaserulesClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.FirebaserulesBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl new file mode 100644 index 000000000000..eda3172fc83d --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl @@ -0,0 +1,360 @@ +package firebaserules + +import ( + "context" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Release struct { + Name *string `json:"name"` + RulesetName *string `json:"rulesetName"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Disabled *bool `json:"disabled"` + Project *string `json:"project"` +} + +func (r *Release) String() string { + return dcl.SprintResource(r) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Release) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "firebaserules", + Type: "Release", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "firebaserules", +{{- end }} + } +} + +func (r *Release) ID() (string, error) { + if err := extractReleaseFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "ruleset_name": dcl.ValueOrEmptyString(nr.RulesetName), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "disabled": dcl.ValueOrEmptyString(nr.Disabled), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ReleaseMaxPage = -1 + +type ReleaseList struct { + Items []*Release + + nextToken string + + pageSize int32 + + resource *Release +} + +func (l *ReleaseList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ReleaseList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listRelease(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListRelease(ctx context.Context, project string) (*ReleaseList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListReleaseWithMaxResults(ctx, project, ReleaseMaxPage) + +} + +func (c *Client) ListReleaseWithMaxResults(ctx context.Context, project string, pageSize int32) (*ReleaseList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Release{ + Project: &project, + } + items, token, err := c.listRelease(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ReleaseList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetRelease(ctx context.Context, r *Release) (*Release, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractReleaseFields(r) + + b, err := c.getReleaseRaw(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalRelease(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeReleaseNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractReleaseFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteRelease(ctx context.Context, r *Release) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Release resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Release...") + deleteOp := deleteReleaseOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllRelease deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllRelease(ctx context.Context, project string, filter func(*Release) bool) error { + listObj, err := c.ListRelease(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllRelease(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllRelease(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyRelease(ctx context.Context, rawDesired *Release, opts ...dcl.ApplyOption) (*Release, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Release + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyReleaseHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyReleaseHelper(c *Client, ctx context.Context, rawDesired *Release, opts ...dcl.ApplyOption) (*Release, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyRelease...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractReleaseFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.releaseDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToReleaseDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []releaseApiOperation + if create { + ops = append(ops, &createReleaseOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyReleaseDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyReleaseDiff(c *Client, ctx context.Context, desired *Release, rawDesired *Release, ops []releaseApiOperation, opts ...dcl.ApplyOption) (*Release, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetRelease(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createReleaseOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapRelease(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeReleaseNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeReleaseNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeReleaseDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractReleaseFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractReleaseFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffRelease(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release_internal.go b/mmv1/third_party/terraform/services/firebaserules/release_internal.go new file mode 100644 index 000000000000..fa75570172f2 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release_internal.go @@ -0,0 +1,614 @@ +package firebaserules + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *Release) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "rulesetName"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + return nil +} +func (r *Release) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://firebaserules.googleapis.com/v1/", params) +} + +func (r *Release) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/releases/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Release) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/releases", nr.basePath(), userBasePath, params), nil + +} + +func (r *Release) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/releases", nr.basePath(), userBasePath, params), nil + +} + +func (r *Release) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/releases/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// releaseApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type releaseApiOperation interface { + do(context.Context, *Release, *Client) error +} + +func (c *Client) listReleaseRaw(ctx context.Context, r *Release, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ReleaseMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listReleaseOperation struct { + Releases []map[string]interface{} `json:"releases"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listRelease(ctx context.Context, r *Release, pageToken string, pageSize int32) ([]*Release, string, error) { + b, err := c.listReleaseRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listReleaseOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Release + for _, v := range m.Releases { + res, err := unmarshalMapRelease(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllRelease(ctx context.Context, f func(*Release) bool, resources []*Release) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteRelease(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteReleaseOperation struct{} + +func (op *deleteReleaseOperation) do(ctx context.Context, r *Release, c *Client) error { + r, err := c.GetRelease(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.InfoWithContextf(ctx, "Release not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetRelease checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Release: %w", err) + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createReleaseOperation struct { + response map[string]interface{} +} + +func (op *createReleaseOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createReleaseOperation) do(ctx context.Context, r *Release, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + if _, err := c.GetRelease(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getReleaseRaw(ctx context.Context, r *Release) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) releaseDiffsForRawDesired(ctx context.Context, rawDesired *Release, opts ...dcl.ApplyOption) (initial, desired *Release, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Release + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Release); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Release, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetRelease(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Release resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Release resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Release resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeReleaseDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Release: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Release: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractReleaseFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeReleaseInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Release: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeReleaseDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Release: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffRelease(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeReleaseInitialState(rawInitial, rawDesired *Release) (*Release, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeReleaseDesiredState(rawDesired, rawInitial *Release, opts ...dcl.ApplyOption) (*Release, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + + return rawDesired, nil + } + canonicalDesired := &Release{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.PartialSelfLinkToSelfLink(rawDesired.RulesetName, rawInitial.RulesetName) { + canonicalDesired.RulesetName = rawInitial.RulesetName + } else { + canonicalDesired.RulesetName = rawDesired.RulesetName + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + return canonicalDesired, nil +} + +func canonicalizeReleaseNewState(c *Client, rawNew, rawDesired *Release) (*Release, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.RulesetName) && dcl.IsEmptyValueIndirect(rawDesired.RulesetName) { + rawNew.RulesetName = rawDesired.RulesetName + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.RulesetName, rawNew.RulesetName) { + rawNew.RulesetName = rawDesired.RulesetName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Disabled) && dcl.IsEmptyValueIndirect(rawDesired.Disabled) { + rawNew.Disabled = rawDesired.Disabled + } else { + if dcl.BoolCanonicalize(rawDesired.Disabled, rawNew.Disabled) { + rawNew.Disabled = rawDesired.Disabled + } + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffRelease(c *Client, desired, actual *Release, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.RulesetName, actual.RulesetName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RulesetName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Disabled, actual.Disabled, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Disabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Release) urlNormalized() *Release { + normalized := dcl.Copy(*r).(Release) + normalized.Name = r.Name + normalized.RulesetName = dcl.SelfLinkToName(r.RulesetName) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Release) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Release resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Release) marshal(c *Client) ([]byte, error) { + m, err := expandRelease(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Release: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalRelease decodes JSON responses into the Release resource schema. +func unmarshalRelease(b []byte, c *Client, res *Release) (*Release, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapRelease(m, c, res) +} + +func unmarshalMapRelease(m map[string]interface{}, c *Client, res *Release) (*Release, error) { + + flattened := flattenRelease(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandRelease expands Release into a JSON request object. +func expandRelease(c *Client, f *Release) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveFromPattern("projects/%s/releases/%s", f.Name, dcl.SelfLinkToName(f.Project), f.Name); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v, err := dcl.DeriveField("projects/%s/rulesets/%s", f.RulesetName, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.RulesetName)); err != nil { + return nil, fmt.Errorf("error expanding RulesetName into rulesetName: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rulesetName"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenRelease flattens Release from a JSON request object into the +// Release type. +func flattenRelease(c *Client, i interface{}, res *Release) *Release { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Release{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.RulesetName = dcl.FlattenString(m["rulesetName"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Disabled = dcl.FlattenBool(m["disabled"]) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Release) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalRelease(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type releaseDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp releaseApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToReleaseDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]releaseDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []releaseDiff + // For each operation name, create a releaseDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := releaseDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToReleaseApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToReleaseApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (releaseApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractReleaseFields(r *Release) error { + return nil +} + +func postReadExtractReleaseFields(r *Release) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release_utils.go b/mmv1/third_party/terraform/services/firebaserules/release_utils.go new file mode 100644 index 000000000000..3e85646f1d5d --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release_utils.go @@ -0,0 +1,13 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// EncodeReleaseUpdateRequest encapsulates fields in a release {} block, as expected +// by https://firebase.google.com/docs/reference/rules/rest/v1/projects.releases/patch +func EncodeReleaseUpdateRequest(m map[string]interface{}) map[string]interface{} { + req := make(map[string]interface{}) + dcl.PutMapEntry(req, []string{"release"}, m) + return req +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go new file mode 100644 index 000000000000..2da190a1def1 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go @@ -0,0 +1,245 @@ +package firebaserules + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFirebaserulesRelease() *schema.Resource { + return &schema.Resource{ + Create: resourceFirebaserulesReleaseCreate, + Read: resourceFirebaserulesReleaseRead, + Delete: resourceFirebaserulesReleaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirebaserulesReleaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Format: `projects/{project_id}/releases/{release_id}`\\Firestore Rules Releases will **always** have the name 'cloud.firestore'", + }, + + "ruleset_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time the release was created.", + }, + + "disabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Disable the release to keep it from being served. The response code of NOT_FOUND will be given for executables generated from this Release.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time the release was updated.", + }, + }, + } +} + +func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Release{ + Name: dcl.String(d.Get("name").(string)), + RulesetName: dcl.String(d.Get("ruleset_name").(string)), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyRelease(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Release: %s", err) + } + + log.Printf("[DEBUG] Finished creating Release %q: %#v", d.Id(), res) + + return resourceFirebaserulesReleaseRead(d, meta) +} + +func resourceFirebaserulesReleaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Release{ + Name: dcl.String(d.Get("name").(string)), + RulesetName: dcl.String(d.Get("ruleset_name").(string)), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetRelease(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("FirebaserulesRelease %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("ruleset_name", res.RulesetName); err != nil { + return fmt.Errorf("error setting ruleset_name in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("disabled", res.Disabled); err != nil { + return fmt.Errorf("error setting disabled in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} + +func resourceFirebaserulesReleaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Release{ + Name: dcl.String(d.Get("name").(string)), + RulesetName: dcl.String(d.Get("ruleset_name").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Release %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteRelease(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Release: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Release %q", d.Id()) + return nil +} + +func resourceFirebaserulesReleaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P.+)/releases/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/releases/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl new file mode 100644 index 000000000000..b392feaaf5d2 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl @@ -0,0 +1,177 @@ +package firebaserules_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckFirebaserulesReleaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(context), + }, + { + ResourceName: "google_firebaserules_release.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccFirebaserulesRelease_StorageReleaseHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaserulesReleaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRelease_StorageReleaseHandWritten(context), + }, + { + ResourceName: "google_firebaserules_release.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_release" "primary" { + name = "cloud.firestore/tf-test-database%{random_suffix}" + project = "%{project_name}" + ruleset_name = "projects/%{project_name}/rulesets/${google_firebaserules_ruleset.firestore.name}" +} + +resource "google_firebaserules_ruleset" "firestore" { + project = "%{project_name}" + + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } +{{- if ne $.TargetVersionName "ga" }} +} + +`, context) +} + +func testAccFirebaserulesRelease_StorageReleaseHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_release" "primary" { + provider = google-beta + name = "firebase.storage/${google_storage_bucket.bucket.name}" + ruleset_name = "projects/%{project_name}/rulesets/${google_firebaserules_ruleset.storage.name}" + project = "%{project_name}" + + lifecycle { + replace_triggered_by = [ + google_firebaserules_ruleset.storage + ] + } +} + +# Provision a non-default Cloud Storage bucket. +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = "%{project_name}" + name = "tf-test-bucket%{random_suffix}" + location = "%{region}" +} + +# Make the Storage bucket accessible for Firebase SDKs, authentication, and Firebase Security Rules. +resource "google_firebase_storage_bucket" "bucket" { + provider = google-beta + project = "%{project_name}" + bucket_id = google_storage_bucket.bucket.name +} + +# Create a ruleset of Firebase Security Rules from a local file. +resource "google_firebaserules_ruleset" "storage" { + provider = google-beta + project = "%{project_name}" + source { + files { + name = "storage.rules" + content = "service firebase.storage {match /b/{bucket}/o {match /{allPaths=**} {allow read, write: if request.auth != null;}}}" + } + } + + depends_on = [ + google_firebase_storage_bucket.bucket + ] +{{- end }} +} + +`, context) +} + +func testAccCheckFirebaserulesReleaseDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_firebaserules_release" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &firebaserules.Release{ + Name: dcl.String(rs.Primary.Attributes["name"]), + RulesetName: dcl.String(rs.Primary.Attributes["ruleset_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Disabled: dcl.Bool(rs.Primary.Attributes["disabled"] == "true"), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := firebaserules.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetRelease(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_firebaserules_release still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml index 0f6fb0fda504..5caf910a758c 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_firebaserules_release' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'firebaserules.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Release' diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go new file mode 100644 index 000000000000..a8c61a3ece0a --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go @@ -0,0 +1,53 @@ +package firebaserules + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("FirebaserulesRelease", testSweepFirebaserulesRelease) +} + +func testSweepFirebaserulesRelease(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for FirebaserulesRelease") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLFirebaserulesClient(config, config.UserAgent, "", 0) + err = client.DeleteAllRelease(context.Background(), d["project"], isDeletableFirebaserulesRelease) + if err != nil { + return err + } + return nil +} + +func isDeletableFirebaserulesRelease(r *Release) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go new file mode 100644 index 000000000000..2059d81bca50 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go @@ -0,0 +1,409 @@ +package firebaserules + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFirebaserulesRuleset() *schema.Resource { + return &schema.Resource{ + Create: resourceFirebaserulesRulesetCreate, + Read: resourceFirebaserulesRulesetRead, + Delete: resourceFirebaserulesRulesetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirebaserulesRulesetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "`Source` for the `Ruleset`.", + MaxItems: 1, + Elem: FirebaserulesRulesetSourceSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time the `Ruleset` was created.", + }, + + "metadata": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The metadata for this ruleset.", + Elem: FirebaserulesRulesetMetadataSchema(), + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Name of the `Ruleset`. The ruleset_id is auto generated by the service. Format: `projects/{project_id}/rulesets/{ruleset_id}`", + }, + }, + } +} + +func FirebaserulesRulesetSourceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "files": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "`File` set constituting the `Source` bundle.", + Elem: FirebaserulesRulesetSourceFilesSchema(), + }, + + "language": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "`Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS", + }, + }, + } +} + +func FirebaserulesRulesetSourceFilesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Textual Content.", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "File name.", + }, + + "fingerprint": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Fingerprint (e.g. github sha) associated with the `File`.", + }, + }, + } +} + +func FirebaserulesRulesetMetadataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "services": { + Type: schema.TypeList, + Computed: true, + Description: "Services that this ruleset has declarations for (e.g., \"cloud.firestore\"). There may be 0+ of these.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Ruleset{ + Source: expandFirebaserulesRulesetSource(d.Get("source")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyRuleset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Ruleset: %s", err) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Ruleset %q: %#v", d.Id(), res) + + return resourceFirebaserulesRulesetRead(d, meta) +} + +func resourceFirebaserulesRulesetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Ruleset{ + Source: expandFirebaserulesRulesetSource(d.Get("source")), + Project: dcl.String(project), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetRuleset(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("FirebaserulesRuleset %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("source", flattenFirebaserulesRulesetSource(res.Source)); err != nil { + return fmt.Errorf("error setting source in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("metadata", flattenFirebaserulesRulesetMetadata(res.Metadata)); err != nil { + return fmt.Errorf("error setting metadata in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + + return nil +} + +func resourceFirebaserulesRulesetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Ruleset{ + Source: expandFirebaserulesRulesetSource(d.Get("source")), + Project: dcl.String(project), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Ruleset %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteRuleset(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Ruleset: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Ruleset %q", d.Id()) + return nil +} + +func resourceFirebaserulesRulesetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/rulesets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandFirebaserulesRulesetSource(o interface{}) *RulesetSource { + if o == nil { + return EmptyRulesetSource + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyRulesetSource + } + obj := objArr[0].(map[string]interface{}) + return &RulesetSource{ + Files: expandFirebaserulesRulesetSourceFilesArray(obj["files"]), + Language: RulesetSourceLanguageEnumRef(obj["language"].(string)), + } +} + +func flattenFirebaserulesRulesetSource(obj *RulesetSource) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "files": flattenFirebaserulesRulesetSourceFilesArray(obj.Files), + "language": obj.Language, + } + + return []interface{}{transformed} + +} +func expandFirebaserulesRulesetSourceFilesArray(o interface{}) []RulesetSourceFiles { + if o == nil { + return make([]RulesetSourceFiles, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]RulesetSourceFiles, 0) + } + + items := make([]RulesetSourceFiles, 0, len(objs)) + for _, item := range objs { + i := expandFirebaserulesRulesetSourceFiles(item) + items = append(items, *i) + } + + return items +} + +func expandFirebaserulesRulesetSourceFiles(o interface{}) *RulesetSourceFiles { + if o == nil { + return EmptyRulesetSourceFiles + } + + obj := o.(map[string]interface{}) + return &RulesetSourceFiles{ + Content: dcl.String(obj["content"].(string)), + Name: dcl.String(obj["name"].(string)), + Fingerprint: dcl.String(obj["fingerprint"].(string)), + } +} + +func flattenFirebaserulesRulesetSourceFilesArray(objs []RulesetSourceFiles) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenFirebaserulesRulesetSourceFiles(&item) + items = append(items, i) + } + + return items +} + +func flattenFirebaserulesRulesetSourceFiles(obj *RulesetSourceFiles) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "content": obj.Content, + "name": obj.Name, + "fingerprint": obj.Fingerprint, + } + + return transformed + +} + +func flattenFirebaserulesRulesetMetadata(obj *RulesetMetadata) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "services": obj.Services, + } + + return []interface{}{transformed} + +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go new file mode 100644 index 000000000000..76eee6c75919 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go @@ -0,0 +1,134 @@ +package firebaserules_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccFirebaserulesRuleset_BasicRuleset(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckFirebaserulesRulesetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRuleset_BasicRuleset(context), + }, + { + ResourceName: "google_firebaserules_ruleset.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccFirebaserulesRuleset_MinimalRuleset(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckFirebaserulesRulesetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRuleset_MinimalRuleset(context), + }, + { + ResourceName: "google_firebaserules_ruleset.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccFirebaserulesRuleset_BasicRuleset(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + fingerprint = "" + } + + language = "" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccFirebaserulesRuleset_MinimalRuleset(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckFirebaserulesRulesetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_firebaserules_ruleset" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &firebaserules.Ruleset{ + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := firebaserules.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetRuleset(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_firebaserules_ruleset still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml index be3d282611ad..32dfa7a247b7 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_firebaserules_ruleset' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'firebaserules.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Ruleset' diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go new file mode 100644 index 000000000000..56b32ba3f279 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go @@ -0,0 +1,53 @@ +package firebaserules + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("FirebaserulesRuleset", testSweepFirebaserulesRuleset) +} + +func testSweepFirebaserulesRuleset(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for FirebaserulesRuleset") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLFirebaserulesClient(config, config.UserAgent, "", 0) + err = client.DeleteAllRuleset(context.Background(), d["project"], isDeletableFirebaserulesRuleset) + if err != nil { + return err + } + return nil +} + +func isDeletableFirebaserulesRuleset(r *Ruleset) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl new file mode 100644 index 000000000000..dbbd77c93409 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl @@ -0,0 +1,534 @@ +package firebaserules + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Ruleset struct { + Name *string `json:"name"` + Source *RulesetSource `json:"source"` + CreateTime *string `json:"createTime"` + Metadata *RulesetMetadata `json:"metadata"` + Project *string `json:"project"` +} + +func (r *Ruleset) String() string { + return dcl.SprintResource(r) +} + +// The enum RulesetSourceLanguageEnum. +type RulesetSourceLanguageEnum string + +// RulesetSourceLanguageEnumRef returns a *RulesetSourceLanguageEnum with the value of string s +// If the empty string is provided, nil is returned. +func RulesetSourceLanguageEnumRef(s string) *RulesetSourceLanguageEnum { + v := RulesetSourceLanguageEnum(s) + return &v +} + +func (v RulesetSourceLanguageEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"LANGUAGE_UNSPECIFIED", "FIREBASE_RULES", "EVENT_FLOW_TRIGGERS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "RulesetSourceLanguageEnum", + Value: string(v), + Valid: []string{}, + } +} + +type RulesetSource struct { + empty bool `json:"-"` + Files []RulesetSourceFiles `json:"files"` + Language *RulesetSourceLanguageEnum `json:"language"` +} + +type jsonRulesetSource RulesetSource + +func (r *RulesetSource) UnmarshalJSON(data []byte) error { + var res jsonRulesetSource + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyRulesetSource + } else { + + r.Files = res.Files + + r.Language = res.Language + + } + return nil +} + +// This object is used to assert a desired state where this RulesetSource is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyRulesetSource *RulesetSource = &RulesetSource{empty: true} + +func (r *RulesetSource) Empty() bool { + return r.empty +} + +func (r *RulesetSource) String() string { + return dcl.SprintResource(r) +} + +func (r *RulesetSource) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type RulesetSourceFiles struct { + empty bool `json:"-"` + Content *string `json:"content"` + Name *string `json:"name"` + Fingerprint *string `json:"fingerprint"` +} + +type jsonRulesetSourceFiles RulesetSourceFiles + +func (r *RulesetSourceFiles) UnmarshalJSON(data []byte) error { + var res jsonRulesetSourceFiles + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyRulesetSourceFiles + } else { + + r.Content = res.Content + + r.Name = res.Name + + r.Fingerprint = res.Fingerprint + + } + return nil +} + +// This object is used to assert a desired state where this RulesetSourceFiles is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyRulesetSourceFiles *RulesetSourceFiles = &RulesetSourceFiles{empty: true} + +func (r *RulesetSourceFiles) Empty() bool { + return r.empty +} + +func (r *RulesetSourceFiles) String() string { + return dcl.SprintResource(r) +} + +func (r *RulesetSourceFiles) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type RulesetMetadata struct { + empty bool `json:"-"` + Services []string `json:"services"` +} + +type jsonRulesetMetadata RulesetMetadata + +func (r *RulesetMetadata) UnmarshalJSON(data []byte) error { + var res jsonRulesetMetadata + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyRulesetMetadata + } else { + + r.Services = res.Services + + } + return nil +} + +// This object is used to assert a desired state where this RulesetMetadata is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyRulesetMetadata *RulesetMetadata = &RulesetMetadata{empty: true} + +func (r *RulesetMetadata) Empty() bool { + return r.empty +} + +func (r *RulesetMetadata) String() string { + return dcl.SprintResource(r) +} + +func (r *RulesetMetadata) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Ruleset) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "firebaserules", + Type: "Ruleset", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "firebaserules", +{{- end }} + } +} + +func (r *Ruleset) ID() (string, error) { + if err := extractRulesetFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "source": dcl.ValueOrEmptyString(nr.Source), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "metadata": dcl.ValueOrEmptyString(nr.Metadata), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const RulesetMaxPage = -1 + +type RulesetList struct { + Items []*Ruleset + + nextToken string + + pageSize int32 + + resource *Ruleset +} + +func (l *RulesetList) HasNext() bool { + return l.nextToken != "" +} + +func (l *RulesetList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listRuleset(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListRuleset(ctx context.Context, project string) (*RulesetList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListRulesetWithMaxResults(ctx, project, RulesetMaxPage) + +} + +func (c *Client) ListRulesetWithMaxResults(ctx context.Context, project string, pageSize int32) (*RulesetList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Ruleset{ + Project: &project, + } + items, token, err := c.listRuleset(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &RulesetList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetRuleset(ctx context.Context, r *Ruleset) (*Ruleset, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractRulesetFields(r) + + b, err := c.getRulesetRaw(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalRuleset(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeRulesetNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractRulesetFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteRuleset(ctx context.Context, r *Ruleset) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Ruleset resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Ruleset...") + deleteOp := deleteRulesetOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllRuleset deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllRuleset(ctx context.Context, project string, filter func(*Ruleset) bool) error { + listObj, err := c.ListRuleset(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllRuleset(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllRuleset(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyRuleset(ctx context.Context, rawDesired *Ruleset, opts ...dcl.ApplyOption) (*Ruleset, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Ruleset + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyRulesetHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyRulesetHelper(c *Client, ctx context.Context, rawDesired *Ruleset, opts ...dcl.ApplyOption) (*Ruleset, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyRuleset...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractRulesetFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.rulesetDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToRulesetDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []rulesetApiOperation + if create { + ops = append(ops, &createRulesetOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyRulesetDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyRulesetDiff(c *Client, ctx context.Context, desired *Ruleset, rawDesired *Ruleset, ops []rulesetApiOperation, opts ...dcl.ApplyOption) (*Ruleset, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetRuleset(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createRulesetOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapRuleset(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeRulesetNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeRulesetNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeRulesetDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractRulesetFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractRulesetFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffRuleset(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go b/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go new file mode 100644 index 000000000000..b0807590434a --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go @@ -0,0 +1,1577 @@ +package firebaserules + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *Ruleset) validate() error { + + if err := dcl.Required(r, "source"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Source) { + if err := r.Source.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Metadata) { + if err := r.Metadata.validate(); err != nil { + return err + } + } + return nil +} +func (r *RulesetSource) validate() error { + if err := dcl.Required(r, "files"); err != nil { + return err + } + return nil +} +func (r *RulesetSourceFiles) validate() error { + if err := dcl.Required(r, "content"); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + return nil +} +func (r *RulesetMetadata) validate() error { + return nil +} +func (r *Ruleset) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://firebaserules.googleapis.com/v1/", params) +} + +func (r *Ruleset) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/rulesets/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Ruleset) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/rulesets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Ruleset) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/rulesets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Ruleset) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/rulesets/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// rulesetApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type rulesetApiOperation interface { + do(context.Context, *Ruleset, *Client) error +} + +func (c *Client) listRulesetRaw(ctx context.Context, r *Ruleset, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != RulesetMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listRulesetOperation struct { + Rulesets []map[string]interface{} `json:"rulesets"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listRuleset(ctx context.Context, r *Ruleset, pageToken string, pageSize int32) ([]*Ruleset, string, error) { + b, err := c.listRulesetRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listRulesetOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Ruleset + for _, v := range m.Rulesets { + res, err := unmarshalMapRuleset(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllRuleset(ctx context.Context, f func(*Ruleset) bool, resources []*Ruleset) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteRuleset(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteRulesetOperation struct{} + +func (op *deleteRulesetOperation) do(ctx context.Context, r *Ruleset, c *Client) error { + r, err := c.GetRuleset(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.InfoWithContextf(ctx, "Ruleset not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetRuleset checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Ruleset: %w", err) + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createRulesetOperation struct { + response map[string]interface{} +} + +func (op *createRulesetOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createRulesetOperation) do(ctx context.Context, r *Ruleset, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + if r.Name != nil { + // Allowing creation to continue with Name set could result in a Ruleset with the wrong Name. + return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + // Include Name in URL substitution for initial GET request. + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + + if _, err := c.GetRuleset(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getRulesetRaw(ctx context.Context, r *Ruleset) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) rulesetDiffsForRawDesired(ctx context.Context, rawDesired *Ruleset, opts ...dcl.ApplyOption) (initial, desired *Ruleset, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Ruleset + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Ruleset); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Ruleset, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + if fetchState.Name == nil { + // We cannot perform a get because of lack of information. We have to assume + // that this is being created for the first time. + desired, err := canonicalizeRulesetDesiredState(rawDesired, nil) + return nil, desired, nil, err + } + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetRuleset(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Ruleset resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Ruleset resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Ruleset resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeRulesetDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Ruleset: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Ruleset: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractRulesetFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeRulesetInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Ruleset: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeRulesetDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Ruleset: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffRuleset(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeRulesetInitialState(rawInitial, rawDesired *Ruleset) (*Ruleset, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeRulesetDesiredState(rawDesired, rawInitial *Ruleset, opts ...dcl.ApplyOption) (*Ruleset, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Source = canonicalizeRulesetSource(rawDesired.Source, nil, opts...) + rawDesired.Metadata = canonicalizeRulesetMetadata(rawDesired.Metadata, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Ruleset{} + if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + canonicalDesired.Source = canonicalizeRulesetSource(rawDesired.Source, rawInitial.Source, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + return canonicalDesired, nil +} + +func canonicalizeRulesetNewState(c *Client, rawNew, rawDesired *Ruleset) (*Ruleset, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Source) && dcl.IsEmptyValueIndirect(rawDesired.Source) { + rawNew.Source = rawDesired.Source + } else { + rawNew.Source = canonicalizeNewRulesetSource(c, rawDesired.Source, rawNew.Source) + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Metadata) && dcl.IsEmptyValueIndirect(rawDesired.Metadata) { + rawNew.Metadata = rawDesired.Metadata + } else { + rawNew.Metadata = canonicalizeNewRulesetMetadata(c, rawDesired.Metadata, rawNew.Metadata) + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +func canonicalizeRulesetSource(des, initial *RulesetSource, opts ...dcl.ApplyOption) *RulesetSource { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &RulesetSource{} + + cDes.Files = canonicalizeRulesetSourceFilesSlice(des.Files, initial.Files, opts...) + if dcl.IsZeroValue(des.Language) || (dcl.IsEmptyValueIndirect(des.Language) && dcl.IsEmptyValueIndirect(initial.Language)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Language = initial.Language + } else { + cDes.Language = des.Language + } + + return cDes +} + +func canonicalizeRulesetSourceSlice(des, initial []RulesetSource, opts ...dcl.ApplyOption) []RulesetSource { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]RulesetSource, 0, len(des)) + for _, d := range des { + cd := canonicalizeRulesetSource(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]RulesetSource, 0, len(des)) + for i, d := range des { + cd := canonicalizeRulesetSource(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewRulesetSource(c *Client, des, nw *RulesetSource) *RulesetSource { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for RulesetSource while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Files = canonicalizeNewRulesetSourceFilesSlice(c, des.Files, nw.Files) + + return nw +} + +func canonicalizeNewRulesetSourceSet(c *Client, des, nw []RulesetSource) []RulesetSource { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []RulesetSource + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareRulesetSourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewRulesetSource(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewRulesetSourceSlice(c *Client, des, nw []RulesetSource) []RulesetSource { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []RulesetSource + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewRulesetSource(c, &d, &n)) + } + + return items +} + +func canonicalizeRulesetSourceFiles(des, initial *RulesetSourceFiles, opts ...dcl.ApplyOption) *RulesetSourceFiles { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &RulesetSourceFiles{} + + if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { + cDes.Content = initial.Content + } else { + cDes.Content = des.Content + } + if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { + cDes.Name = initial.Name + } else { + cDes.Name = des.Name + } + if dcl.StringCanonicalize(des.Fingerprint, initial.Fingerprint) || dcl.IsZeroValue(des.Fingerprint) { + cDes.Fingerprint = initial.Fingerprint + } else { + cDes.Fingerprint = des.Fingerprint + } + + return cDes +} + +func canonicalizeRulesetSourceFilesSlice(des, initial []RulesetSourceFiles, opts ...dcl.ApplyOption) []RulesetSourceFiles { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]RulesetSourceFiles, 0, len(des)) + for _, d := range des { + cd := canonicalizeRulesetSourceFiles(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]RulesetSourceFiles, 0, len(des)) + for i, d := range des { + cd := canonicalizeRulesetSourceFiles(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewRulesetSourceFiles(c *Client, des, nw *RulesetSourceFiles) *RulesetSourceFiles { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for RulesetSourceFiles while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Content, nw.Content) { + nw.Content = des.Content + } + if dcl.StringCanonicalize(des.Name, nw.Name) { + nw.Name = des.Name + } + if dcl.StringCanonicalize(des.Fingerprint, nw.Fingerprint) { + nw.Fingerprint = des.Fingerprint + } + + return nw +} + +func canonicalizeNewRulesetSourceFilesSet(c *Client, des, nw []RulesetSourceFiles) []RulesetSourceFiles { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []RulesetSourceFiles + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareRulesetSourceFilesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewRulesetSourceFiles(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewRulesetSourceFilesSlice(c *Client, des, nw []RulesetSourceFiles) []RulesetSourceFiles { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []RulesetSourceFiles + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewRulesetSourceFiles(c, &d, &n)) + } + + return items +} + +func canonicalizeRulesetMetadata(des, initial *RulesetMetadata, opts ...dcl.ApplyOption) *RulesetMetadata { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &RulesetMetadata{} + + if dcl.StringArrayCanonicalize(des.Services, initial.Services) { + cDes.Services = initial.Services + } else { + cDes.Services = des.Services + } + + return cDes +} + +func canonicalizeRulesetMetadataSlice(des, initial []RulesetMetadata, opts ...dcl.ApplyOption) []RulesetMetadata { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]RulesetMetadata, 0, len(des)) + for _, d := range des { + cd := canonicalizeRulesetMetadata(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]RulesetMetadata, 0, len(des)) + for i, d := range des { + cd := canonicalizeRulesetMetadata(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewRulesetMetadata(c *Client, des, nw *RulesetMetadata) *RulesetMetadata { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for RulesetMetadata while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Services, nw.Services) { + nw.Services = des.Services + } + + return nw +} + +func canonicalizeNewRulesetMetadataSet(c *Client, des, nw []RulesetMetadata) []RulesetMetadata { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []RulesetMetadata + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareRulesetMetadataNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewRulesetMetadata(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewRulesetMetadataSlice(c *Client, des, nw []RulesetMetadata) []RulesetMetadata { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []RulesetMetadata + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewRulesetMetadata(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffRuleset(c *Client, desired, actual *Ruleset, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Source, actual.Source, dcl.DiffInfo{ObjectFunction: compareRulesetSourceNewStyle, EmptyObject: EmptyRulesetSource, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Source")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareRulesetMetadataNewStyle, EmptyObject: EmptyRulesetMetadata, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareRulesetSourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*RulesetSource) + if !ok { + desiredNotPointer, ok := d.(RulesetSource) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSource or *RulesetSource", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*RulesetSource) + if !ok { + actualNotPointer, ok := a.(RulesetSource) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSource", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Files, actual.Files, dcl.DiffInfo{ObjectFunction: compareRulesetSourceFilesNewStyle, EmptyObject: EmptyRulesetSourceFiles, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Files")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Language, actual.Language, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Language")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareRulesetSourceFilesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*RulesetSourceFiles) + if !ok { + desiredNotPointer, ok := d.(RulesetSourceFiles) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSourceFiles or *RulesetSourceFiles", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*RulesetSourceFiles) + if !ok { + actualNotPointer, ok := a.(RulesetSourceFiles) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSourceFiles", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Content")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fingerprint, actual.Fingerprint, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fingerprint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareRulesetMetadataNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*RulesetMetadata) + if !ok { + desiredNotPointer, ok := d.(RulesetMetadata) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetMetadata or *RulesetMetadata", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*RulesetMetadata) + if !ok { + actualNotPointer, ok := a.(RulesetMetadata) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetMetadata", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Services, actual.Services, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Services")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Ruleset) urlNormalized() *Ruleset { + normalized := dcl.Copy(*r).(Ruleset) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Ruleset) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Ruleset resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Ruleset) marshal(c *Client) ([]byte, error) { + m, err := expandRuleset(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Ruleset: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalRuleset decodes JSON responses into the Ruleset resource schema. +func unmarshalRuleset(b []byte, c *Client, res *Ruleset) (*Ruleset, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapRuleset(m, c, res) +} + +func unmarshalMapRuleset(m map[string]interface{}, c *Client, res *Ruleset) (*Ruleset, error) { + + flattened := flattenRuleset(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandRuleset expands Ruleset into a JSON request object. +func expandRuleset(c *Client, f *Ruleset) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/rulesets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v, err := expandRulesetSource(c, f.Source, res); err != nil { + return nil, fmt.Errorf("error expanding Source into source: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["source"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenRuleset flattens Ruleset from a JSON request object into the +// Ruleset type. +func flattenRuleset(c *Client, i interface{}, res *Ruleset) *Ruleset { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Ruleset{} + resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + resultRes.Source = flattenRulesetSource(c, m["source"], res) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.Metadata = flattenRulesetMetadata(c, m["metadata"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// expandRulesetSourceMap expands the contents of RulesetSource into a JSON +// request object. +func expandRulesetSourceMap(c *Client, f map[string]RulesetSource, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandRulesetSource(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandRulesetSourceSlice expands the contents of RulesetSource into a JSON +// request object. +func expandRulesetSourceSlice(c *Client, f []RulesetSource, res *Ruleset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandRulesetSource(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenRulesetSourceMap flattens the contents of RulesetSource from a JSON +// response object. +func flattenRulesetSourceMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetSource { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetSource{} + } + + if len(a) == 0 { + return map[string]RulesetSource{} + } + + items := make(map[string]RulesetSource) + for k, item := range a { + items[k] = *flattenRulesetSource(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenRulesetSourceSlice flattens the contents of RulesetSource from a JSON +// response object. +func flattenRulesetSourceSlice(c *Client, i interface{}, res *Ruleset) []RulesetSource { + a, ok := i.([]interface{}) + if !ok { + return []RulesetSource{} + } + + if len(a) == 0 { + return []RulesetSource{} + } + + items := make([]RulesetSource, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetSource(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandRulesetSource expands an instance of RulesetSource into a JSON +// request object. +func expandRulesetSource(c *Client, f *RulesetSource, res *Ruleset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandRulesetSourceFilesSlice(c, f.Files, res); err != nil { + return nil, fmt.Errorf("error expanding Files into files: %w", err) + } else if v != nil { + m["files"] = v + } + if v := f.Language; !dcl.IsEmptyValueIndirect(v) { + m["language"] = v + } + + return m, nil +} + +// flattenRulesetSource flattens an instance of RulesetSource from a JSON +// response object. +func flattenRulesetSource(c *Client, i interface{}, res *Ruleset) *RulesetSource { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &RulesetSource{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyRulesetSource + } + r.Files = flattenRulesetSourceFilesSlice(c, m["files"], res) + r.Language = flattenRulesetSourceLanguageEnum(m["language"]) + + return r +} + +// expandRulesetSourceFilesMap expands the contents of RulesetSourceFiles into a JSON +// request object. +func expandRulesetSourceFilesMap(c *Client, f map[string]RulesetSourceFiles, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandRulesetSourceFiles(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandRulesetSourceFilesSlice expands the contents of RulesetSourceFiles into a JSON +// request object. +func expandRulesetSourceFilesSlice(c *Client, f []RulesetSourceFiles, res *Ruleset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandRulesetSourceFiles(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenRulesetSourceFilesMap flattens the contents of RulesetSourceFiles from a JSON +// response object. +func flattenRulesetSourceFilesMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetSourceFiles { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetSourceFiles{} + } + + if len(a) == 0 { + return map[string]RulesetSourceFiles{} + } + + items := make(map[string]RulesetSourceFiles) + for k, item := range a { + items[k] = *flattenRulesetSourceFiles(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenRulesetSourceFilesSlice flattens the contents of RulesetSourceFiles from a JSON +// response object. +func flattenRulesetSourceFilesSlice(c *Client, i interface{}, res *Ruleset) []RulesetSourceFiles { + a, ok := i.([]interface{}) + if !ok { + return []RulesetSourceFiles{} + } + + if len(a) == 0 { + return []RulesetSourceFiles{} + } + + items := make([]RulesetSourceFiles, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetSourceFiles(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandRulesetSourceFiles expands an instance of RulesetSourceFiles into a JSON +// request object. +func expandRulesetSourceFiles(c *Client, f *RulesetSourceFiles, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Content; !dcl.IsEmptyValueIndirect(v) { + m["content"] = v + } + if v := f.Name; !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Fingerprint; !dcl.IsEmptyValueIndirect(v) { + m["fingerprint"] = v + } + + return m, nil +} + +// flattenRulesetSourceFiles flattens an instance of RulesetSourceFiles from a JSON +// response object. +func flattenRulesetSourceFiles(c *Client, i interface{}, res *Ruleset) *RulesetSourceFiles { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &RulesetSourceFiles{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyRulesetSourceFiles + } + r.Content = dcl.FlattenString(m["content"]) + r.Name = dcl.FlattenString(m["name"]) + r.Fingerprint = dcl.FlattenString(m["fingerprint"]) + + return r +} + +// expandRulesetMetadataMap expands the contents of RulesetMetadata into a JSON +// request object. +func expandRulesetMetadataMap(c *Client, f map[string]RulesetMetadata, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandRulesetMetadata(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandRulesetMetadataSlice expands the contents of RulesetMetadata into a JSON +// request object. +func expandRulesetMetadataSlice(c *Client, f []RulesetMetadata, res *Ruleset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandRulesetMetadata(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenRulesetMetadataMap flattens the contents of RulesetMetadata from a JSON +// response object. +func flattenRulesetMetadataMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetMetadata { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetMetadata{} + } + + if len(a) == 0 { + return map[string]RulesetMetadata{} + } + + items := make(map[string]RulesetMetadata) + for k, item := range a { + items[k] = *flattenRulesetMetadata(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenRulesetMetadataSlice flattens the contents of RulesetMetadata from a JSON +// response object. +func flattenRulesetMetadataSlice(c *Client, i interface{}, res *Ruleset) []RulesetMetadata { + a, ok := i.([]interface{}) + if !ok { + return []RulesetMetadata{} + } + + if len(a) == 0 { + return []RulesetMetadata{} + } + + items := make([]RulesetMetadata, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetMetadata(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandRulesetMetadata expands an instance of RulesetMetadata into a JSON +// request object. +func expandRulesetMetadata(c *Client, f *RulesetMetadata, res *Ruleset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Services; v != nil { + m["services"] = v + } + + return m, nil +} + +// flattenRulesetMetadata flattens an instance of RulesetMetadata from a JSON +// response object. +func flattenRulesetMetadata(c *Client, i interface{}, res *Ruleset) *RulesetMetadata { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &RulesetMetadata{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyRulesetMetadata + } + r.Services = dcl.FlattenStringSlice(m["services"]) + + return r +} + +// flattenRulesetSourceLanguageEnumMap flattens the contents of RulesetSourceLanguageEnum from a JSON +// response object. +func flattenRulesetSourceLanguageEnumMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetSourceLanguageEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetSourceLanguageEnum{} + } + + if len(a) == 0 { + return map[string]RulesetSourceLanguageEnum{} + } + + items := make(map[string]RulesetSourceLanguageEnum) + for k, item := range a { + items[k] = *flattenRulesetSourceLanguageEnum(item.(interface{})) + } + + return items +} + +// flattenRulesetSourceLanguageEnumSlice flattens the contents of RulesetSourceLanguageEnum from a JSON +// response object. +func flattenRulesetSourceLanguageEnumSlice(c *Client, i interface{}, res *Ruleset) []RulesetSourceLanguageEnum { + a, ok := i.([]interface{}) + if !ok { + return []RulesetSourceLanguageEnum{} + } + + if len(a) == 0 { + return []RulesetSourceLanguageEnum{} + } + + items := make([]RulesetSourceLanguageEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetSourceLanguageEnum(item.(interface{}))) + } + + return items +} + +// flattenRulesetSourceLanguageEnum asserts that an interface is a string, and returns a +// pointer to a *RulesetSourceLanguageEnum with the same value as that string. +func flattenRulesetSourceLanguageEnum(i interface{}) *RulesetSourceLanguageEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return RulesetSourceLanguageEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Ruleset) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalRuleset(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type rulesetDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp rulesetApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToRulesetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]rulesetDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []rulesetDiff + // For each operation name, create a rulesetDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := rulesetDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToRulesetApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToRulesetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (rulesetApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractRulesetFields(r *Ruleset) error { + vSource := r.Source + if vSource == nil { + // note: explicitly not the empty object. + vSource = &RulesetSource{} + } + if err := extractRulesetSourceFields(r, vSource); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSource) { + r.Source = vSource + } + vMetadata := r.Metadata + if vMetadata == nil { + // note: explicitly not the empty object. + vMetadata = &RulesetMetadata{} + } + if err := extractRulesetMetadataFields(r, vMetadata); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetadata) { + r.Metadata = vMetadata + } + return nil +} +func extractRulesetSourceFields(r *Ruleset, o *RulesetSource) error { + return nil +} +func extractRulesetSourceFilesFields(r *Ruleset, o *RulesetSourceFiles) error { + return nil +} +func extractRulesetMetadataFields(r *Ruleset, o *RulesetMetadata) error { + return nil +} + +func postReadExtractRulesetFields(r *Ruleset) error { + vSource := r.Source + if vSource == nil { + // note: explicitly not the empty object. + vSource = &RulesetSource{} + } + if err := postReadExtractRulesetSourceFields(r, vSource); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSource) { + r.Source = vSource + } + vMetadata := r.Metadata + if vMetadata == nil { + // note: explicitly not the empty object. + vMetadata = &RulesetMetadata{} + } + if err := postReadExtractRulesetMetadataFields(r, vMetadata); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetadata) { + r.Metadata = vMetadata + } + return nil +} +func postReadExtractRulesetSourceFields(r *Ruleset, o *RulesetSource) error { + return nil +} +func postReadExtractRulesetSourceFilesFields(r *Ruleset, o *RulesetSourceFiles) error { + return nil +} +func postReadExtractRulesetMetadataFields(r *Ruleset, o *RulesetMetadata) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/client.go b/mmv1/third_party/terraform/services/gkehub/client.go new file mode 100644 index 000000000000..a29d06089889 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/client.go @@ -0,0 +1,18 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl new file mode 100644 index 000000000000..b863f3549e60 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl @@ -0,0 +1,1643 @@ +package gkehub + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type FeatureMembership struct { + Mesh *FeatureMembershipMesh `json:"mesh"` + Configmanagement *FeatureMembershipConfigmanagement `json:"configmanagement"` + Policycontroller *FeatureMembershipPolicycontroller `json:"policycontroller"` + Project *string `json:"project"` + Location *string `json:"location"` + Feature *string `json:"feature"` + Membership *string `json:"membership"` + MembershipLocation *string `json:"membershipLocation"` +} + +func (r *FeatureMembership) String() string { + return dcl.SprintResource(r) +} + +// The enum FeatureMembershipMeshManagementEnum. +type FeatureMembershipMeshManagementEnum string + +// FeatureMembershipMeshManagementEnumRef returns a *FeatureMembershipMeshManagementEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipMeshManagementEnumRef(s string) *FeatureMembershipMeshManagementEnum { + v := FeatureMembershipMeshManagementEnum(s) + return &v +} + +func (v FeatureMembershipMeshManagementEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipMeshManagementEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipMeshControlPlaneEnum. +type FeatureMembershipMeshControlPlaneEnum string + +// FeatureMembershipMeshControlPlaneEnumRef returns a *FeatureMembershipMeshControlPlaneEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipMeshControlPlaneEnumRef(s string) *FeatureMembershipMeshControlPlaneEnum { + v := FeatureMembershipMeshControlPlaneEnum(s) + return &v +} + +func (v FeatureMembershipMeshControlPlaneEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CONTROL_PLANE_MANAGEMENT_UNSPECIFIED", "AUTOMATIC", "MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipMeshControlPlaneEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum. +type FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum string + +// FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef returns a *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(s string) *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + v := FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(s) + return &v +} + +func (v FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MONITORING_BACKEND_UNSPECIFIED", "PROMETHEUS", "CLOUD_MONITORING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipConfigmanagementManagementEnum. +type FeatureMembershipConfigmanagementManagementEnum string + +// FeatureMembershipConfigmanagementManagementEnumRef returns a *FeatureMembershipConfigmanagementManagementEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipConfigmanagementManagementEnumRef(s string) *FeatureMembershipConfigmanagementManagementEnum { + v := FeatureMembershipConfigmanagementManagementEnum(s) + return &v +} + +func (v FeatureMembershipConfigmanagementManagementEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipConfigmanagementManagementEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"INSTALL_SPEC_UNSPECIFIED", "INSTALL_SPEC_NOT_INSTALLED", "INSTALL_SPEC_ENABLED", "INSTALL_SPEC_SUSPENDED", "INSTALL_SPEC_DETACHED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MONITORING_BACKEND_UNSPECIFIED", "PROMETHEUS", "CLOUD_MONITORING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"INSTALLATION_UNSPECIFIED", "NOT_INSTALLED", "ALL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"AFFINITY_UNSPECIFIED", "NO_AFFINITY", "ANTI_AFFINITY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum", + Value: string(v), + Valid: []string{}, + } +} + +type FeatureMembershipMesh struct { + empty bool `json:"-"` + Management *FeatureMembershipMeshManagementEnum `json:"management"` + ControlPlane *FeatureMembershipMeshControlPlaneEnum `json:"controlPlane"` +} + +type jsonFeatureMembershipMesh FeatureMembershipMesh + +func (r *FeatureMembershipMesh) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipMesh + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipMesh + } else { + + r.Management = res.Management + + r.ControlPlane = res.ControlPlane + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipMesh is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipMesh *FeatureMembershipMesh = &FeatureMembershipMesh{empty: true} + +func (r *FeatureMembershipMesh) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipMesh) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipMesh) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagement struct { + empty bool `json:"-"` + ConfigSync *FeatureMembershipConfigmanagementConfigSync `json:"configSync"` + PolicyController *FeatureMembershipConfigmanagementPolicyController `json:"policyController"` + HierarchyController *FeatureMembershipConfigmanagementHierarchyController `json:"hierarchyController"` + Version *string `json:"version"` + Management *FeatureMembershipConfigmanagementManagementEnum `json:"management"` +} + +type jsonFeatureMembershipConfigmanagement FeatureMembershipConfigmanagement + +func (r *FeatureMembershipConfigmanagement) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagement + } else { + + r.ConfigSync = res.ConfigSync + + r.PolicyController = res.PolicyController + + r.HierarchyController = res.HierarchyController + + r.Version = res.Version + + r.Management = res.Management + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagement *FeatureMembershipConfigmanagement = &FeatureMembershipConfigmanagement{empty: true} + +func (r *FeatureMembershipConfigmanagement) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagement) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSync struct { + empty bool `json:"-"` + DeploymentOverrides []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides `json:"deploymentOverrides"` + Git *FeatureMembershipConfigmanagementConfigSyncGit `json:"git"` + SourceFormat *string `json:"sourceFormat"` + Enabled *bool `json:"enabled"` + StopSyncing *bool `json:"stopSyncing"` + PreventDrift *bool `json:"preventDrift"` + MetricsGcpServiceAccountEmail *string `json:"metricsGcpServiceAccountEmail"` + Oci *FeatureMembershipConfigmanagementConfigSyncOci `json:"oci"` +} + +type jsonFeatureMembershipConfigmanagementConfigSync FeatureMembershipConfigmanagementConfigSync + +func (r *FeatureMembershipConfigmanagementConfigSync) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSync + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSync + } else { + + r.DeploymentOverrides = res.DeploymentOverrides + + r.Git = res.Git + + r.SourceFormat = res.SourceFormat + + r.Enabled = res.Enabled + + r.StopSyncing = res.StopSyncing + + r.PreventDrift = res.PreventDrift + + r.MetricsGcpServiceAccountEmail = res.MetricsGcpServiceAccountEmail + + r.Oci = res.Oci + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSync is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSync *FeatureMembershipConfigmanagementConfigSync = &FeatureMembershipConfigmanagementConfigSync{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSync) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSync) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSync) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides struct { + empty bool `json:"-"` + DeploymentName *string `json:"deploymentName"` + DeploymentNamespace *string `json:"deploymentNamespace"` + Containers []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers `json:"containers"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + } else { + + r.DeploymentName = res.DeploymentName + + r.DeploymentNamespace = res.DeploymentNamespace + + r.Containers = res.Containers + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides = &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers struct { + empty bool `json:"-"` + ContainerName *string `json:"containerName"` + CpuRequest *string `json:"cpuRequest"` + MemoryRequest *string `json:"memoryRequest"` + CpuLimit *string `json:"cpuLimit"` + MemoryLimit *string `json:"memoryLimit"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + } else { + + r.ContainerName = res.ContainerName + + r.CpuRequest = res.CpuRequest + + r.MemoryRequest = res.MemoryRequest + + r.CpuLimit = res.CpuLimit + + r.MemoryLimit = res.MemoryLimit + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers = &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncGit struct { + empty bool `json:"-"` + SyncRepo *string `json:"syncRepo"` + SyncBranch *string `json:"syncBranch"` + PolicyDir *string `json:"policyDir"` + SyncWaitSecs *string `json:"syncWaitSecs"` + SyncRev *string `json:"syncRev"` + SecretType *string `json:"secretType"` + HttpsProxy *string `json:"httpsProxy"` + GcpServiceAccountEmail *string `json:"gcpServiceAccountEmail"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncGit FeatureMembershipConfigmanagementConfigSyncGit + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncGit + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncGit + } else { + + r.SyncRepo = res.SyncRepo + + r.SyncBranch = res.SyncBranch + + r.PolicyDir = res.PolicyDir + + r.SyncWaitSecs = res.SyncWaitSecs + + r.SyncRev = res.SyncRev + + r.SecretType = res.SecretType + + r.HttpsProxy = res.HttpsProxy + + r.GcpServiceAccountEmail = res.GcpServiceAccountEmail + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncGit is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncGit *FeatureMembershipConfigmanagementConfigSyncGit = &FeatureMembershipConfigmanagementConfigSyncGit{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncOci struct { + empty bool `json:"-"` + SyncRepo *string `json:"syncRepo"` + PolicyDir *string `json:"policyDir"` + SyncWaitSecs *string `json:"syncWaitSecs"` + SecretType *string `json:"secretType"` + GcpServiceAccountEmail *string `json:"gcpServiceAccountEmail"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncOci FeatureMembershipConfigmanagementConfigSyncOci + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncOci + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncOci + } else { + + r.SyncRepo = res.SyncRepo + + r.PolicyDir = res.PolicyDir + + r.SyncWaitSecs = res.SyncWaitSecs + + r.SecretType = res.SecretType + + r.GcpServiceAccountEmail = res.GcpServiceAccountEmail + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncOci is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncOci *FeatureMembershipConfigmanagementConfigSyncOci = &FeatureMembershipConfigmanagementConfigSyncOci{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementPolicyController struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + ExemptableNamespaces []string `json:"exemptableNamespaces"` + ReferentialRulesEnabled *bool `json:"referentialRulesEnabled"` + LogDeniesEnabled *bool `json:"logDeniesEnabled"` + MutationEnabled *bool `json:"mutationEnabled"` + Monitoring *FeatureMembershipConfigmanagementPolicyControllerMonitoring `json:"monitoring"` + TemplateLibraryInstalled *bool `json:"templateLibraryInstalled"` + AuditIntervalSeconds *string `json:"auditIntervalSeconds"` +} + +type jsonFeatureMembershipConfigmanagementPolicyController FeatureMembershipConfigmanagementPolicyController + +func (r *FeatureMembershipConfigmanagementPolicyController) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementPolicyController + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementPolicyController + } else { + + r.Enabled = res.Enabled + + r.ExemptableNamespaces = res.ExemptableNamespaces + + r.ReferentialRulesEnabled = res.ReferentialRulesEnabled + + r.LogDeniesEnabled = res.LogDeniesEnabled + + r.MutationEnabled = res.MutationEnabled + + r.Monitoring = res.Monitoring + + r.TemplateLibraryInstalled = res.TemplateLibraryInstalled + + r.AuditIntervalSeconds = res.AuditIntervalSeconds + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementPolicyController is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementPolicyController *FeatureMembershipConfigmanagementPolicyController = &FeatureMembershipConfigmanagementPolicyController{empty: true} + +func (r *FeatureMembershipConfigmanagementPolicyController) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementPolicyController) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementPolicyController) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementPolicyControllerMonitoring struct { + empty bool `json:"-"` + Backends []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum `json:"backends"` +} + +type jsonFeatureMembershipConfigmanagementPolicyControllerMonitoring FeatureMembershipConfigmanagementPolicyControllerMonitoring + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementPolicyControllerMonitoring + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring + } else { + + r.Backends = res.Backends + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementPolicyControllerMonitoring is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring *FeatureMembershipConfigmanagementPolicyControllerMonitoring = &FeatureMembershipConfigmanagementPolicyControllerMonitoring{empty: true} + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementHierarchyController struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + EnablePodTreeLabels *bool `json:"enablePodTreeLabels"` + EnableHierarchicalResourceQuota *bool `json:"enableHierarchicalResourceQuota"` +} + +type jsonFeatureMembershipConfigmanagementHierarchyController FeatureMembershipConfigmanagementHierarchyController + +func (r *FeatureMembershipConfigmanagementHierarchyController) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementHierarchyController + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementHierarchyController + } else { + + r.Enabled = res.Enabled + + r.EnablePodTreeLabels = res.EnablePodTreeLabels + + r.EnableHierarchicalResourceQuota = res.EnableHierarchicalResourceQuota + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementHierarchyController is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementHierarchyController *FeatureMembershipConfigmanagementHierarchyController = &FeatureMembershipConfigmanagementHierarchyController{empty: true} + +func (r *FeatureMembershipConfigmanagementHierarchyController) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementHierarchyController) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementHierarchyController) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontroller struct { + empty bool `json:"-"` + Version *string `json:"version"` + PolicyControllerHubConfig *FeatureMembershipPolicycontrollerPolicyControllerHubConfig `json:"policyControllerHubConfig"` +} + +type jsonFeatureMembershipPolicycontroller FeatureMembershipPolicycontroller + +func (r *FeatureMembershipPolicycontroller) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontroller + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontroller + } else { + + r.Version = res.Version + + r.PolicyControllerHubConfig = res.PolicyControllerHubConfig + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontroller is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontroller *FeatureMembershipPolicycontroller = &FeatureMembershipPolicycontroller{empty: true} + +func (r *FeatureMembershipPolicycontroller) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontroller) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontroller) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfig struct { + empty bool `json:"-"` + InstallSpec *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum `json:"installSpec"` + ExemptableNamespaces []string `json:"exemptableNamespaces"` + ReferentialRulesEnabled *bool `json:"referentialRulesEnabled"` + LogDeniesEnabled *bool `json:"logDeniesEnabled"` + MutationEnabled *bool `json:"mutationEnabled"` + Monitoring *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring `json:"monitoring"` + AuditIntervalSeconds *int64 `json:"auditIntervalSeconds"` + ConstraintViolationLimit *int64 `json:"constraintViolationLimit"` + PolicyContent *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent `json:"policyContent"` + DeploymentConfigs map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs `json:"deploymentConfigs"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfig FeatureMembershipPolicycontrollerPolicyControllerHubConfig + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } else { + + r.InstallSpec = res.InstallSpec + + r.ExemptableNamespaces = res.ExemptableNamespaces + + r.ReferentialRulesEnabled = res.ReferentialRulesEnabled + + r.LogDeniesEnabled = res.LogDeniesEnabled + + r.MutationEnabled = res.MutationEnabled + + r.Monitoring = res.Monitoring + + r.AuditIntervalSeconds = res.AuditIntervalSeconds + + r.ConstraintViolationLimit = res.ConstraintViolationLimit + + r.PolicyContent = res.PolicyContent + + r.DeploymentConfigs = res.DeploymentConfigs + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig *FeatureMembershipPolicycontrollerPolicyControllerHubConfig = &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring struct { + empty bool `json:"-"` + Backends []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum `json:"backends"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + } else { + + r.Backends = res.Backends + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent struct { + empty bool `json:"-"` + TemplateLibrary *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary `json:"templateLibrary"` + Bundles map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles `json:"bundles"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + } else { + + r.TemplateLibrary = res.TemplateLibrary + + r.Bundles = res.Bundles + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary struct { + empty bool `json:"-"` + Installation *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum `json:"installation"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + } else { + + r.Installation = res.Installation + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles struct { + empty bool `json:"-"` + ExemptedNamespaces []string `json:"exemptedNamespaces"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } else { + + r.ExemptedNamespaces = res.ExemptedNamespaces + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs struct { + empty bool `json:"-"` + ReplicaCount *int64 `json:"replicaCount"` + ContainerResources *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources `json:"containerResources"` + PodAffinity *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum `json:"podAffinity"` + PodTolerations []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations `json:"podTolerations"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + } else { + + r.ReplicaCount = res.ReplicaCount + + r.ContainerResources = res.ContainerResources + + r.PodAffinity = res.PodAffinity + + r.PodTolerations = res.PodTolerations + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources struct { + empty bool `json:"-"` + Limits *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits `json:"limits"` + Requests *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests `json:"requests"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } else { + + r.Limits = res.Limits + + r.Requests = res.Requests + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits struct { + empty bool `json:"-"` + Memory *string `json:"memory"` + Cpu *string `json:"cpu"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } else { + + r.Memory = res.Memory + + r.Cpu = res.Cpu + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests struct { + empty bool `json:"-"` + Memory *string `json:"memory"` + Cpu *string `json:"cpu"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } else { + + r.Memory = res.Memory + + r.Cpu = res.Cpu + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations struct { + empty bool `json:"-"` + Key *string `json:"key"` + Operator *string `json:"operator"` + Value *string `json:"value"` + Effect *string `json:"effect"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } else { + + r.Key = res.Key + + r.Operator = res.Operator + + r.Value = res.Value + + r.Effect = res.Effect + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *FeatureMembership) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "gke_hub", + Type: "FeatureMembership", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "gkehub", +{{- end }} + } +} + +func (r *FeatureMembership) ID() (string, error) { + if err := extractFeatureMembershipFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "mesh": dcl.ValueOrEmptyString(nr.Mesh), + "configmanagement": dcl.ValueOrEmptyString(nr.Configmanagement), + "policycontroller": dcl.ValueOrEmptyString(nr.Policycontroller), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + "membership": dcl.ValueOrEmptyString(nr.Membership), + "membership_location": dcl.ValueOrEmptyString(nr.MembershipLocation), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/memberships/{{ "{{" }}membership{{ "}}" }}", params), nil +} + +const FeatureMembershipMaxPage = -1 + +type FeatureMembershipList struct { + Items []*FeatureMembership + + nextToken string + + resource *FeatureMembership +} + +func (c *Client) DeleteFeatureMembership(ctx context.Context, r *FeatureMembership) error { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 409: dcl.Retryability{ + Retryable: true, + Pattern: "", + Timeout: 60000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("FeatureMembership resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting FeatureMembership...") + deleteOp := deleteFeatureMembershipOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllFeatureMembership deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllFeatureMembership(ctx context.Context, project, location, feature string, filter func(*FeatureMembership) bool) error { + listObj, err := c.ListFeatureMembership(ctx, project, location, feature) + if err != nil { + return err + } + + err = c.deleteAllFeatureMembership(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllFeatureMembership(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyFeatureMembership(ctx context.Context, rawDesired *FeatureMembership, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 409: dcl.Retryability{ + Retryable: true, + Pattern: "", + Timeout: 60000000000, + }, + }))) + var resultNewState *FeatureMembership + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyFeatureMembershipHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyFeatureMembershipHelper(c *Client, ctx context.Context, rawDesired *FeatureMembership, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyFeatureMembership...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractFeatureMembershipFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.featureMembershipDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToFeatureMembershipDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []featureMembershipApiOperation + if create { + ops = append(ops, &createFeatureMembershipOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyFeatureMembershipDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyFeatureMembershipDiff(c *Client, ctx context.Context, desired *FeatureMembership, rawDesired *FeatureMembership, ops []featureMembershipApiOperation, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetFeatureMembership(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createFeatureMembershipOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapFeatureMembership(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeFeatureMembershipNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeFeatureMembershipNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeFeatureMembershipDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractFeatureMembershipFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractFeatureMembershipFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffFeatureMembership(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl new file mode 100644 index 000000000000..84eb94af8b06 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl @@ -0,0 +1,8174 @@ +package gkehub + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *FeatureMembership) validate() error { + + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Feature, "Feature"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Membership, "Membership"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Mesh) { + if err := r.Mesh.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Configmanagement) { + if err := r.Configmanagement.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Policycontroller) { + if err := r.Policycontroller.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipMesh) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagement) validate() error { + if !dcl.IsEmptyValueIndirect(r.ConfigSync) { + if err := r.ConfigSync.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PolicyController) { + if err := r.PolicyController.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.HierarchyController) { + if err := r.HierarchyController.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSync) validate() error { + if !dcl.IsEmptyValueIndirect(r.Git) { + if err := r.Git.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Oci) { + if err := r.Oci.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncGit) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncOci) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementPolicyController) validate() error { + if !dcl.IsEmptyValueIndirect(r.Monitoring) { + if err := r.Monitoring.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementHierarchyController) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontroller) validate() error { + if err := dcl.Required(r, "policyControllerHubConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.PolicyControllerHubConfig) { + if err := r.PolicyControllerHubConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.Monitoring) { + if err := r.Monitoring.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PolicyContent) { + if err := r.PolicyContent.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) validate() error { + if !dcl.IsEmptyValueIndirect(r.TemplateLibrary) { + if err := r.TemplateLibrary.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) validate() error { + if err := dcl.ValidateAtLeastOneOfFieldsSet([]string{"ReplicaCount", "ContainerResources", "PodAffinity", "PodTolerations"}, r.ReplicaCount, r.ContainerResources, r.PodAffinity, r.PodTolerations); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ContainerResources) { + if err := r.ContainerResources.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) validate() error { + if !dcl.IsEmptyValueIndirect(r.Limits) { + if err := r.Limits.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Requests) { + if err := r.Requests.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) validate() error { + return nil +} +func (r *FeatureMembership) basePath() string { + params := map[string]interface{}{} +{{- if ne $.TargetVersionName "ga" }} + return dcl.Nprintf("https://gkehub.googleapis.com/v1beta1/", params) +{{- else }} + return dcl.Nprintf("https://gkehub.googleapis.com/v1/", params) +{{- end }} +} + +func (r *FeatureMembership) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *FeatureMembership) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *FeatureMembership) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *FeatureMembership) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// featureMembershipApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type featureMembershipApiOperation interface { + do(context.Context, *FeatureMembership, *Client) error +} + +// newUpdateFeatureMembershipUpdateFeatureMembershipRequest creates a request for an +// FeatureMembership resource's UpdateFeatureMembership update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateFeatureMembershipUpdateFeatureMembershipRequest(ctx context.Context, f *FeatureMembership, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := expandFeatureMembershipMesh(c, f.Mesh, res); err != nil { + return nil, fmt.Errorf("error expanding Mesh into mesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["mesh"] = v + } + if v, err := expandFeatureMembershipConfigmanagement(c, f.Configmanagement, res); err != nil { + return nil, fmt.Errorf("error expanding Configmanagement into configmanagement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["configmanagement"] = v + } + if v, err := expandFeatureMembershipPolicycontroller(c, f.Policycontroller, res); err != nil { + return nil, fmt.Errorf("error expanding Policycontroller into policycontroller: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["policycontroller"] = v + } + return req, nil +} + +// marshalUpdateFeatureMembershipUpdateFeatureMembershipRequest converts the update into +// the final JSON request body. +func marshalUpdateFeatureMembershipUpdateFeatureMembershipRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateFeatureMembershipUpdateFeatureMembershipOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (c *Client) deleteAllFeatureMembership(ctx context.Context, f func(*FeatureMembership) bool, resources []*FeatureMembership) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteFeatureMembership(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteFeatureMembershipOperation struct{} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createFeatureMembershipOperation struct { + response map[string]interface{} +} + +func (op *createFeatureMembershipOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (c *Client) featureMembershipDiffsForRawDesired(ctx context.Context, rawDesired *FeatureMembership, opts ...dcl.ApplyOption) (initial, desired *FeatureMembership, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *FeatureMembership + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*FeatureMembership); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected FeatureMembership, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetFeatureMembership(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a FeatureMembership resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve FeatureMembership resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that FeatureMembership resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeFeatureMembershipDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for FeatureMembership: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for FeatureMembership: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractFeatureMembershipFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeFeatureMembershipInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for FeatureMembership: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeFeatureMembershipDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for FeatureMembership: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffFeatureMembership(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeFeatureMembershipInitialState(rawInitial, rawDesired *FeatureMembership) (*FeatureMembership, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeFeatureMembershipDesiredState(rawDesired, rawInitial *FeatureMembership, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Mesh = canonicalizeFeatureMembershipMesh(rawDesired.Mesh, nil, opts...) + rawDesired.Configmanagement = canonicalizeFeatureMembershipConfigmanagement(rawDesired.Configmanagement, nil, opts...) + rawDesired.Policycontroller = canonicalizeFeatureMembershipPolicycontroller(rawDesired.Policycontroller, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &FeatureMembership{} + canonicalDesired.Mesh = canonicalizeFeatureMembershipMesh(rawDesired.Mesh, rawInitial.Mesh, opts...) + canonicalDesired.Configmanagement = canonicalizeFeatureMembershipConfigmanagement(rawDesired.Configmanagement, rawInitial.Configmanagement, opts...) + canonicalDesired.Policycontroller = canonicalizeFeatureMembershipPolicycontroller(rawDesired.Policycontroller, rawInitial.Policycontroller, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Feature, rawInitial.Feature) { + canonicalDesired.Feature = rawInitial.Feature + } else { + canonicalDesired.Feature = rawDesired.Feature + } + if dcl.NameToSelfLink(rawDesired.Membership, rawInitial.Membership) { + canonicalDesired.Membership = rawInitial.Membership + } else { + canonicalDesired.Membership = rawDesired.Membership + } + if dcl.NameToSelfLink(rawDesired.MembershipLocation, rawInitial.MembershipLocation) { + canonicalDesired.MembershipLocation = rawInitial.MembershipLocation + } else { + canonicalDesired.MembershipLocation = rawDesired.MembershipLocation + } + return canonicalDesired, nil +} + +func canonicalizeFeatureMembershipNewState(c *Client, rawNew, rawDesired *FeatureMembership) (*FeatureMembership, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Mesh) && dcl.IsEmptyValueIndirect(rawDesired.Mesh) { + rawNew.Mesh = rawDesired.Mesh + } else { + rawNew.Mesh = canonicalizeNewFeatureMembershipMesh(c, rawDesired.Mesh, rawNew.Mesh) + } + + if dcl.IsEmptyValueIndirect(rawNew.Configmanagement) && dcl.IsEmptyValueIndirect(rawDesired.Configmanagement) { + rawNew.Configmanagement = rawDesired.Configmanagement + } else { + rawNew.Configmanagement = canonicalizeNewFeatureMembershipConfigmanagement(c, rawDesired.Configmanagement, rawNew.Configmanagement) + } + + if dcl.IsEmptyValueIndirect(rawNew.Policycontroller) && dcl.IsEmptyValueIndirect(rawDesired.Policycontroller) { + rawNew.Policycontroller = rawDesired.Policycontroller + } else { + rawNew.Policycontroller = canonicalizeNewFeatureMembershipPolicycontroller(c, rawDesired.Policycontroller, rawNew.Policycontroller) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Feature = rawDesired.Feature + + rawNew.Membership = rawDesired.Membership + + rawNew.MembershipLocation = rawDesired.MembershipLocation + + return rawNew, nil +} + +func canonicalizeFeatureMembershipMesh(des, initial *FeatureMembershipMesh, opts ...dcl.ApplyOption) *FeatureMembershipMesh { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipMesh{} + + if dcl.IsZeroValue(des.Management) || (dcl.IsEmptyValueIndirect(des.Management) && dcl.IsEmptyValueIndirect(initial.Management)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Management = initial.Management + } else { + cDes.Management = des.Management + } + if dcl.IsZeroValue(des.ControlPlane) || (dcl.IsEmptyValueIndirect(des.ControlPlane) && dcl.IsEmptyValueIndirect(initial.ControlPlane)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ControlPlane = initial.ControlPlane + } else { + cDes.ControlPlane = des.ControlPlane + } + + return cDes +} + +func canonicalizeFeatureMembershipMeshSlice(des, initial []FeatureMembershipMesh, opts ...dcl.ApplyOption) []FeatureMembershipMesh { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipMesh, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipMesh(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipMesh, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipMesh(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipMesh(c *Client, des, nw *FeatureMembershipMesh) *FeatureMembershipMesh { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipMesh while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipMeshSet(c *Client, des, nw []FeatureMembershipMesh) []FeatureMembershipMesh { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipMesh + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipMeshNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipMesh(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipMeshSlice(c *Client, des, nw []FeatureMembershipMesh) []FeatureMembershipMesh { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipMesh + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipMesh(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagement(des, initial *FeatureMembershipConfigmanagement, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagement{} + + cDes.ConfigSync = canonicalizeFeatureMembershipConfigmanagementConfigSync(des.ConfigSync, initial.ConfigSync, opts...) + cDes.PolicyController = canonicalizeFeatureMembershipConfigmanagementPolicyController(des.PolicyController, initial.PolicyController, opts...) + cDes.HierarchyController = canonicalizeFeatureMembershipConfigmanagementHierarchyController(des.HierarchyController, initial.HierarchyController, opts...) + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + if dcl.IsZeroValue(des.Management) || (dcl.IsEmptyValueIndirect(des.Management) && dcl.IsEmptyValueIndirect(initial.Management)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Management = initial.Management + } else { + cDes.Management = des.Management + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementSlice(des, initial []FeatureMembershipConfigmanagement, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagement, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagement, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagement(c *Client, des, nw *FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ConfigSync = canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c, des.ConfigSync, nw.ConfigSync) + nw.PolicyController = canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c, des.PolicyController, nw.PolicyController) + nw.HierarchyController = canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c, des.HierarchyController, nw.HierarchyController) + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementSet(c *Client, des, nw []FeatureMembershipConfigmanagement) []FeatureMembershipConfigmanagement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementSlice(c *Client, des, nw []FeatureMembershipConfigmanagement) []FeatureMembershipConfigmanagement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagement(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSync(des, initial *FeatureMembershipConfigmanagementConfigSync, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSync{} + + cDes.DeploymentOverrides = canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(des.DeploymentOverrides, initial.DeploymentOverrides, opts...) + cDes.Git = canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(des.Git, initial.Git, opts...) + if dcl.StringCanonicalize(des.SourceFormat, initial.SourceFormat) || dcl.IsZeroValue(des.SourceFormat) { + cDes.SourceFormat = initial.SourceFormat + } else { + cDes.SourceFormat = des.SourceFormat + } + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.StopSyncing, initial.StopSyncing) || dcl.IsZeroValue(des.StopSyncing) { + cDes.StopSyncing = initial.StopSyncing + } else { + cDes.StopSyncing = des.StopSyncing + } + if dcl.BoolCanonicalize(des.PreventDrift, initial.PreventDrift) || dcl.IsZeroValue(des.PreventDrift) { + cDes.PreventDrift = initial.PreventDrift + } else { + cDes.PreventDrift = des.PreventDrift + } + if dcl.IsZeroValue(des.MetricsGcpServiceAccountEmail) || (dcl.IsEmptyValueIndirect(des.MetricsGcpServiceAccountEmail) && dcl.IsEmptyValueIndirect(initial.MetricsGcpServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MetricsGcpServiceAccountEmail = initial.MetricsGcpServiceAccountEmail + } else { + cDes.MetricsGcpServiceAccountEmail = des.MetricsGcpServiceAccountEmail + } + cDes.Oci = canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(des.Oci, initial.Oci, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncSlice(des, initial []FeatureMembershipConfigmanagementConfigSync, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSync, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSync(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSync, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSync(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSync) *FeatureMembershipConfigmanagementConfigSync { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSync while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.DeploymentOverrides = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c, des.DeploymentOverrides, nw.DeploymentOverrides) + nw.Git = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c, des.Git, nw.Git) + if dcl.StringCanonicalize(des.SourceFormat, nw.SourceFormat) { + nw.SourceFormat = des.SourceFormat + } + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.StopSyncing, nw.StopSyncing) { + nw.StopSyncing = des.StopSyncing + } + if dcl.BoolCanonicalize(des.PreventDrift, nw.PreventDrift) { + nw.PreventDrift = des.PreventDrift + } + nw.Oci = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c, des.Oci, nw.Oci) + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSync) []FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSync + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSync) []FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSync + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(des, initial *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + + if dcl.StringCanonicalize(des.DeploymentName, initial.DeploymentName) || dcl.IsZeroValue(des.DeploymentName) { + cDes.DeploymentName = initial.DeploymentName + } else { + cDes.DeploymentName = des.DeploymentName + } + if dcl.StringCanonicalize(des.DeploymentNamespace, initial.DeploymentNamespace) || dcl.IsZeroValue(des.DeploymentNamespace) { + cDes.DeploymentNamespace = initial.DeploymentNamespace + } else { + cDes.DeploymentNamespace = des.DeploymentNamespace + } + cDes.Containers = canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(des.Containers, initial.Containers, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.DeploymentName, nw.DeploymentName) { + nw.DeploymentName = des.DeploymentName + } + if dcl.StringCanonicalize(des.DeploymentNamespace, nw.DeploymentNamespace) { + nw.DeploymentNamespace = des.DeploymentNamespace + } + nw.Containers = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c, des.Containers, nw.Containers) + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(des, initial *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + + if dcl.StringCanonicalize(des.ContainerName, initial.ContainerName) || dcl.IsZeroValue(des.ContainerName) { + cDes.ContainerName = initial.ContainerName + } else { + cDes.ContainerName = des.ContainerName + } + if dcl.StringCanonicalize(des.CpuRequest, initial.CpuRequest) || dcl.IsZeroValue(des.CpuRequest) { + cDes.CpuRequest = initial.CpuRequest + } else { + cDes.CpuRequest = des.CpuRequest + } + if dcl.StringCanonicalize(des.MemoryRequest, initial.MemoryRequest) || dcl.IsZeroValue(des.MemoryRequest) { + cDes.MemoryRequest = initial.MemoryRequest + } else { + cDes.MemoryRequest = des.MemoryRequest + } + if dcl.StringCanonicalize(des.CpuLimit, initial.CpuLimit) || dcl.IsZeroValue(des.CpuLimit) { + cDes.CpuLimit = initial.CpuLimit + } else { + cDes.CpuLimit = des.CpuLimit + } + if dcl.StringCanonicalize(des.MemoryLimit, initial.MemoryLimit) || dcl.IsZeroValue(des.MemoryLimit) { + cDes.MemoryLimit = initial.MemoryLimit + } else { + cDes.MemoryLimit = des.MemoryLimit + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ContainerName, nw.ContainerName) { + nw.ContainerName = des.ContainerName + } + if dcl.StringCanonicalize(des.CpuRequest, nw.CpuRequest) { + nw.CpuRequest = des.CpuRequest + } + if dcl.StringCanonicalize(des.MemoryRequest, nw.MemoryRequest) { + nw.MemoryRequest = des.MemoryRequest + } + if dcl.StringCanonicalize(des.CpuLimit, nw.CpuLimit) { + nw.CpuLimit = des.CpuLimit + } + if dcl.StringCanonicalize(des.MemoryLimit, nw.MemoryLimit) { + nw.MemoryLimit = des.MemoryLimit + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(des, initial *FeatureMembershipConfigmanagementConfigSyncGit, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncGit { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncGit{} + + if dcl.StringCanonicalize(des.SyncRepo, initial.SyncRepo) || dcl.IsZeroValue(des.SyncRepo) { + cDes.SyncRepo = initial.SyncRepo + } else { + cDes.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.SyncBranch, initial.SyncBranch) || dcl.IsZeroValue(des.SyncBranch) { + cDes.SyncBranch = initial.SyncBranch + } else { + cDes.SyncBranch = des.SyncBranch + } + if dcl.StringCanonicalize(des.PolicyDir, initial.PolicyDir) || dcl.IsZeroValue(des.PolicyDir) { + cDes.PolicyDir = initial.PolicyDir + } else { + cDes.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, initial.SyncWaitSecs) || dcl.IsZeroValue(des.SyncWaitSecs) { + cDes.SyncWaitSecs = initial.SyncWaitSecs + } else { + cDes.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SyncRev, initial.SyncRev) || dcl.IsZeroValue(des.SyncRev) { + cDes.SyncRev = initial.SyncRev + } else { + cDes.SyncRev = des.SyncRev + } + if dcl.StringCanonicalize(des.SecretType, initial.SecretType) || dcl.IsZeroValue(des.SecretType) { + cDes.SecretType = initial.SecretType + } else { + cDes.SecretType = des.SecretType + } + if dcl.StringCanonicalize(des.HttpsProxy, initial.HttpsProxy) || dcl.IsZeroValue(des.HttpsProxy) { + cDes.HttpsProxy = initial.HttpsProxy + } else { + cDes.HttpsProxy = des.HttpsProxy + } + if dcl.IsZeroValue(des.GcpServiceAccountEmail) || (dcl.IsEmptyValueIndirect(des.GcpServiceAccountEmail) && dcl.IsEmptyValueIndirect(initial.GcpServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcpServiceAccountEmail = initial.GcpServiceAccountEmail + } else { + cDes.GcpServiceAccountEmail = des.GcpServiceAccountEmail + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncGitSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncGit, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncGit { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncGit, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncGit, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncGit) *FeatureMembershipConfigmanagementConfigSyncGit { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncGit while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SyncRepo, nw.SyncRepo) { + nw.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.SyncBranch, nw.SyncBranch) { + nw.SyncBranch = des.SyncBranch + } + if dcl.StringCanonicalize(des.PolicyDir, nw.PolicyDir) { + nw.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, nw.SyncWaitSecs) { + nw.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SyncRev, nw.SyncRev) { + nw.SyncRev = des.SyncRev + } + if dcl.StringCanonicalize(des.SecretType, nw.SecretType) { + nw.SecretType = des.SecretType + } + if dcl.StringCanonicalize(des.HttpsProxy, nw.HttpsProxy) { + nw.HttpsProxy = des.HttpsProxy + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGitSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncGit) []FeatureMembershipConfigmanagementConfigSyncGit { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncGit + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncGitNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGitSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncGit) []FeatureMembershipConfigmanagementConfigSyncGit { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncGit + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(des, initial *FeatureMembershipConfigmanagementConfigSyncOci, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncOci { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncOci{} + + if dcl.StringCanonicalize(des.SyncRepo, initial.SyncRepo) || dcl.IsZeroValue(des.SyncRepo) { + cDes.SyncRepo = initial.SyncRepo + } else { + cDes.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.PolicyDir, initial.PolicyDir) || dcl.IsZeroValue(des.PolicyDir) { + cDes.PolicyDir = initial.PolicyDir + } else { + cDes.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, initial.SyncWaitSecs) || dcl.IsZeroValue(des.SyncWaitSecs) { + cDes.SyncWaitSecs = initial.SyncWaitSecs + } else { + cDes.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SecretType, initial.SecretType) || dcl.IsZeroValue(des.SecretType) { + cDes.SecretType = initial.SecretType + } else { + cDes.SecretType = des.SecretType + } + if dcl.IsZeroValue(des.GcpServiceAccountEmail) || (dcl.IsEmptyValueIndirect(des.GcpServiceAccountEmail) && dcl.IsEmptyValueIndirect(initial.GcpServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcpServiceAccountEmail = initial.GcpServiceAccountEmail + } else { + cDes.GcpServiceAccountEmail = des.GcpServiceAccountEmail + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncOciSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncOci, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncOci { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncOci, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncOci, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncOci) *FeatureMembershipConfigmanagementConfigSyncOci { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncOci while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SyncRepo, nw.SyncRepo) { + nw.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.PolicyDir, nw.PolicyDir) { + nw.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, nw.SyncWaitSecs) { + nw.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SecretType, nw.SecretType) { + nw.SecretType = des.SecretType + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOciSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncOci) []FeatureMembershipConfigmanagementConfigSyncOci { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncOci + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncOciNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOciSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncOci) []FeatureMembershipConfigmanagementConfigSyncOci { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncOci + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyController(des, initial *FeatureMembershipConfigmanagementPolicyController, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementPolicyController { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementPolicyController{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, initial.ExemptableNamespaces) { + cDes.ExemptableNamespaces = initial.ExemptableNamespaces + } else { + cDes.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, initial.ReferentialRulesEnabled) || dcl.IsZeroValue(des.ReferentialRulesEnabled) { + cDes.ReferentialRulesEnabled = initial.ReferentialRulesEnabled + } else { + cDes.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, initial.LogDeniesEnabled) || dcl.IsZeroValue(des.LogDeniesEnabled) { + cDes.LogDeniesEnabled = initial.LogDeniesEnabled + } else { + cDes.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, initial.MutationEnabled) || dcl.IsZeroValue(des.MutationEnabled) { + cDes.MutationEnabled = initial.MutationEnabled + } else { + cDes.MutationEnabled = des.MutationEnabled + } + cDes.Monitoring = canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(des.Monitoring, initial.Monitoring, opts...) + if dcl.BoolCanonicalize(des.TemplateLibraryInstalled, initial.TemplateLibraryInstalled) || dcl.IsZeroValue(des.TemplateLibraryInstalled) { + cDes.TemplateLibraryInstalled = initial.TemplateLibraryInstalled + } else { + cDes.TemplateLibraryInstalled = des.TemplateLibraryInstalled + } + if dcl.StringCanonicalize(des.AuditIntervalSeconds, initial.AuditIntervalSeconds) || dcl.IsZeroValue(des.AuditIntervalSeconds) { + cDes.AuditIntervalSeconds = initial.AuditIntervalSeconds + } else { + cDes.AuditIntervalSeconds = des.AuditIntervalSeconds + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyControllerSlice(des, initial []FeatureMembershipConfigmanagementPolicyController, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementPolicyController { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementPolicyController, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyController(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementPolicyController, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyController(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c *Client, des, nw *FeatureMembershipConfigmanagementPolicyController) *FeatureMembershipConfigmanagementPolicyController { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementPolicyController while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, nw.ExemptableNamespaces) { + nw.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, nw.ReferentialRulesEnabled) { + nw.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, nw.LogDeniesEnabled) { + nw.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, nw.MutationEnabled) { + nw.MutationEnabled = des.MutationEnabled + } + nw.Monitoring = canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, des.Monitoring, nw.Monitoring) + if dcl.BoolCanonicalize(des.TemplateLibraryInstalled, nw.TemplateLibraryInstalled) { + nw.TemplateLibraryInstalled = des.TemplateLibraryInstalled + } + if dcl.StringCanonicalize(des.AuditIntervalSeconds, nw.AuditIntervalSeconds) { + nw.AuditIntervalSeconds = des.AuditIntervalSeconds + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerSet(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyController) []FeatureMembershipConfigmanagementPolicyController { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementPolicyController + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementPolicyControllerNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerSlice(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyController) []FeatureMembershipConfigmanagementPolicyController { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementPolicyController + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(des, initial *FeatureMembershipConfigmanagementPolicyControllerMonitoring, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + + if dcl.IsZeroValue(des.Backends) || (dcl.IsEmptyValueIndirect(des.Backends) && dcl.IsEmptyValueIndirect(initial.Backends)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Backends = initial.Backends + } else { + cDes.Backends = des.Backends + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(des, initial []FeatureMembershipConfigmanagementPolicyControllerMonitoring, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoring, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoring, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c *Client, des, nw *FeatureMembershipConfigmanagementPolicyControllerMonitoring) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementPolicyControllerMonitoring while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoringSet(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyControllerMonitoring) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementPolicyControllerMonitoring + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementPolicyControllerMonitoringNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyControllerMonitoring) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementPolicyControllerMonitoring + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementHierarchyController(des, initial *FeatureMembershipConfigmanagementHierarchyController, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementHierarchyController{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.EnablePodTreeLabels, initial.EnablePodTreeLabels) || dcl.IsZeroValue(des.EnablePodTreeLabels) { + cDes.EnablePodTreeLabels = initial.EnablePodTreeLabels + } else { + cDes.EnablePodTreeLabels = des.EnablePodTreeLabels + } + if dcl.BoolCanonicalize(des.EnableHierarchicalResourceQuota, initial.EnableHierarchicalResourceQuota) || dcl.IsZeroValue(des.EnableHierarchicalResourceQuota) { + cDes.EnableHierarchicalResourceQuota = initial.EnableHierarchicalResourceQuota + } else { + cDes.EnableHierarchicalResourceQuota = des.EnableHierarchicalResourceQuota + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementHierarchyControllerSlice(des, initial []FeatureMembershipConfigmanagementHierarchyController, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementHierarchyController, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementHierarchyController(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementHierarchyController, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementHierarchyController(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c *Client, des, nw *FeatureMembershipConfigmanagementHierarchyController) *FeatureMembershipConfigmanagementHierarchyController { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementHierarchyController while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.EnablePodTreeLabels, nw.EnablePodTreeLabels) { + nw.EnablePodTreeLabels = des.EnablePodTreeLabels + } + if dcl.BoolCanonicalize(des.EnableHierarchicalResourceQuota, nw.EnableHierarchicalResourceQuota) { + nw.EnableHierarchicalResourceQuota = des.EnableHierarchicalResourceQuota + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementHierarchyControllerSet(c *Client, des, nw []FeatureMembershipConfigmanagementHierarchyController) []FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementHierarchyController + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementHierarchyControllerSlice(c *Client, des, nw []FeatureMembershipConfigmanagementHierarchyController) []FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementHierarchyController + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontroller(des, initial *FeatureMembershipPolicycontroller, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontroller { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontroller{} + + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + cDes.PolicyControllerHubConfig = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(des.PolicyControllerHubConfig, initial.PolicyControllerHubConfig, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerSlice(des, initial []FeatureMembershipPolicycontroller, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontroller { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontroller, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontroller(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontroller, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontroller(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontroller(c *Client, des, nw *FeatureMembershipPolicycontroller) *FeatureMembershipPolicycontroller { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontroller while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + nw.PolicyControllerHubConfig = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, des.PolicyControllerHubConfig, nw.PolicyControllerHubConfig) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerSet(c *Client, des, nw []FeatureMembershipPolicycontroller) []FeatureMembershipPolicycontroller { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontroller + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontroller(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerSlice(c *Client, des, nw []FeatureMembershipPolicycontroller) []FeatureMembershipPolicycontroller { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontroller + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontroller(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfig, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + + if dcl.IsZeroValue(des.InstallSpec) || (dcl.IsEmptyValueIndirect(des.InstallSpec) && dcl.IsEmptyValueIndirect(initial.InstallSpec)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.InstallSpec = initial.InstallSpec + } else { + cDes.InstallSpec = des.InstallSpec + } + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, initial.ExemptableNamespaces) { + cDes.ExemptableNamespaces = initial.ExemptableNamespaces + } else { + cDes.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, initial.ReferentialRulesEnabled) || dcl.IsZeroValue(des.ReferentialRulesEnabled) { + cDes.ReferentialRulesEnabled = initial.ReferentialRulesEnabled + } else { + cDes.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, initial.LogDeniesEnabled) || dcl.IsZeroValue(des.LogDeniesEnabled) { + cDes.LogDeniesEnabled = initial.LogDeniesEnabled + } else { + cDes.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, initial.MutationEnabled) || dcl.IsZeroValue(des.MutationEnabled) { + cDes.MutationEnabled = initial.MutationEnabled + } else { + cDes.MutationEnabled = des.MutationEnabled + } + cDes.Monitoring = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(des.Monitoring, initial.Monitoring, opts...) + if dcl.IsZeroValue(des.AuditIntervalSeconds) || (dcl.IsEmptyValueIndirect(des.AuditIntervalSeconds) && dcl.IsEmptyValueIndirect(initial.AuditIntervalSeconds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AuditIntervalSeconds = initial.AuditIntervalSeconds + } else { + cDes.AuditIntervalSeconds = des.AuditIntervalSeconds + } + if dcl.IsZeroValue(des.ConstraintViolationLimit) || (dcl.IsEmptyValueIndirect(des.ConstraintViolationLimit) && dcl.IsEmptyValueIndirect(initial.ConstraintViolationLimit)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ConstraintViolationLimit = initial.ConstraintViolationLimit + } else { + cDes.ConstraintViolationLimit = des.ConstraintViolationLimit + } + cDes.PolicyContent = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(des.PolicyContent, initial.PolicyContent, opts...) + if dcl.IsZeroValue(des.DeploymentConfigs) || (dcl.IsEmptyValueIndirect(des.DeploymentConfigs) && dcl.IsEmptyValueIndirect(initial.DeploymentConfigs)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DeploymentConfigs = initial.DeploymentConfigs + } else { + cDes.DeploymentConfigs = des.DeploymentConfigs + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfig, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, nw.ExemptableNamespaces) { + nw.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, nw.ReferentialRulesEnabled) { + nw.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, nw.LogDeniesEnabled) { + nw.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, nw.MutationEnabled) { + nw.MutationEnabled = des.MutationEnabled + } + nw.Monitoring = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, des.Monitoring, nw.Monitoring) + nw.PolicyContent = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, des.PolicyContent, nw.PolicyContent) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfig) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfig) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + + if dcl.IsZeroValue(des.Backends) || (dcl.IsEmptyValueIndirect(des.Backends) && dcl.IsEmptyValueIndirect(initial.Backends)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Backends = initial.Backends + } else { + cDes.Backends = des.Backends + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + + cDes.TemplateLibrary = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(des.TemplateLibrary, initial.TemplateLibrary, opts...) + if dcl.IsZeroValue(des.Bundles) || (dcl.IsEmptyValueIndirect(des.Bundles) && dcl.IsEmptyValueIndirect(initial.Bundles)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Bundles = initial.Bundles + } else { + cDes.Bundles = des.Bundles + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.TemplateLibrary = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, des.TemplateLibrary, nw.TemplateLibrary) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + + if dcl.IsZeroValue(des.Installation) || (dcl.IsEmptyValueIndirect(des.Installation) && dcl.IsEmptyValueIndirect(initial.Installation)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Installation = initial.Installation + } else { + cDes.Installation = des.Installation + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + + if dcl.StringArrayCanonicalize(des.ExemptedNamespaces, initial.ExemptedNamespaces) { + cDes.ExemptedNamespaces = initial.ExemptedNamespaces + } else { + cDes.ExemptedNamespaces = des.ExemptedNamespaces + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.ExemptedNamespaces, nw.ExemptedNamespaces) { + nw.ExemptedNamespaces = des.ExemptedNamespaces + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.ReplicaCount != nil || (initial != nil && initial.ReplicaCount != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ContainerResources, des.PodAffinity, des.PodTolerations) { + des.ReplicaCount = nil + if initial != nil { + initial.ReplicaCount = nil + } + } + } + + if des.ContainerResources != nil || (initial != nil && initial.ContainerResources != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ReplicaCount, des.PodAffinity, des.PodTolerations) { + des.ContainerResources = nil + if initial != nil { + initial.ContainerResources = nil + } + } + } + + if des.PodAffinity != nil || (initial != nil && initial.PodAffinity != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ReplicaCount, des.ContainerResources, des.PodTolerations) { + des.PodAffinity = nil + if initial != nil { + initial.PodAffinity = nil + } + } + } + + if des.PodTolerations != nil || (initial != nil && initial.PodTolerations != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ReplicaCount, des.ContainerResources, des.PodAffinity) { + des.PodTolerations = nil + if initial != nil { + initial.PodTolerations = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + + if dcl.IsZeroValue(des.ReplicaCount) || (dcl.IsEmptyValueIndirect(des.ReplicaCount) && dcl.IsEmptyValueIndirect(initial.ReplicaCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ReplicaCount = initial.ReplicaCount + } else { + cDes.ReplicaCount = des.ReplicaCount + } + cDes.ContainerResources = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(des.ContainerResources, initial.ContainerResources, opts...) + if dcl.IsZeroValue(des.PodAffinity) || (dcl.IsEmptyValueIndirect(des.PodAffinity) && dcl.IsEmptyValueIndirect(initial.PodAffinity)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PodAffinity = initial.PodAffinity + } else { + cDes.PodAffinity = des.PodAffinity + } + cDes.PodTolerations = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(des.PodTolerations, initial.PodTolerations, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ContainerResources = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, des.ContainerResources, nw.ContainerResources) + nw.PodTolerations = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c, des.PodTolerations, nw.PodTolerations) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + + cDes.Limits = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(des.Limits, initial.Limits, opts...) + cDes.Requests = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(des.Requests, initial.Requests, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Limits = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, des.Limits, nw.Limits) + nw.Requests = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, des.Requests, nw.Requests) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + + if dcl.StringCanonicalize(des.Memory, initial.Memory) || dcl.IsZeroValue(des.Memory) { + cDes.Memory = initial.Memory + } else { + cDes.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, initial.Cpu) || dcl.IsZeroValue(des.Cpu) { + cDes.Cpu = initial.Cpu + } else { + cDes.Cpu = des.Cpu + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Memory, nw.Memory) { + nw.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, nw.Cpu) { + nw.Cpu = des.Cpu + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + + if dcl.StringCanonicalize(des.Memory, initial.Memory) || dcl.IsZeroValue(des.Memory) { + cDes.Memory = initial.Memory + } else { + cDes.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, initial.Cpu) || dcl.IsZeroValue(des.Cpu) { + cDes.Cpu = initial.Cpu + } else { + cDes.Cpu = des.Cpu + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Memory, nw.Memory) { + nw.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, nw.Cpu) { + nw.Cpu = des.Cpu + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringCanonicalize(des.Operator, initial.Operator) || dcl.IsZeroValue(des.Operator) { + cDes.Operator = initial.Operator + } else { + cDes.Operator = des.Operator + } + if dcl.StringCanonicalize(des.Value, initial.Value) || dcl.IsZeroValue(des.Value) { + cDes.Value = initial.Value + } else { + cDes.Value = des.Value + } + if dcl.StringCanonicalize(des.Effect, initial.Effect) || dcl.IsZeroValue(des.Effect) { + cDes.Effect = initial.Effect + } else { + cDes.Effect = des.Effect + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringCanonicalize(des.Operator, nw.Operator) { + nw.Operator = des.Operator + } + if dcl.StringCanonicalize(des.Value, nw.Value) { + nw.Value = des.Value + } + if dcl.StringCanonicalize(des.Effect, nw.Effect) { + nw.Effect = des.Effect + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffFeatureMembership(c *Client, desired, actual *FeatureMembership, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Mesh, actual.Mesh, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipMeshNewStyle, EmptyObject: EmptyFeatureMembershipMesh, OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Mesh")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Configmanagement, actual.Configmanagement, dcl.DiffInfo{MergeNestedDiffs: true, ObjectFunction: compareFeatureMembershipConfigmanagementNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagement, OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Configmanagement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Policycontroller, actual.Policycontroller, dcl.DiffInfo{MergeNestedDiffs: true, ObjectFunction: compareFeatureMembershipPolicycontrollerNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontroller, OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Policycontroller")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Feature, actual.Feature, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Feature")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MembershipLocation, actual.MembershipLocation, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MembershipLocation")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareFeatureMembershipMeshNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipMesh) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipMesh or *FeatureMembershipMesh", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipMesh) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipMesh", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ControlPlane, actual.ControlPlane, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("ControlPlane")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagement) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagement or *FeatureMembershipConfigmanagement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagement) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConfigSync, actual.ConfigSync, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSync, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigSync")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyController, actual.PolicyController, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementPolicyControllerNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementPolicyController, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyController")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HierarchyController, actual.HierarchyController, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementHierarchyController, CustomDiff: emptyHNCSameAsAllFalse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HierarchyController")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSync) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSync) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSync or *FeatureMembershipConfigmanagementConfigSync", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSync) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSync) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSync", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DeploymentOverrides, actual.DeploymentOverrides, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentOverrides")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Git, actual.Git, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncGitNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncGit, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Git")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SourceFormat, actual.SourceFormat, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SourceFormat")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StopSyncing, actual.StopSyncing, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StopSyncing")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PreventDrift, actual.PreventDrift, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PreventDrift")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetricsGcpServiceAccountEmail, actual.MetricsGcpServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricsGcpServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Oci, actual.Oci, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncOciNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncOci, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Oci")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides or *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DeploymentName, actual.DeploymentName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeploymentNamespace, actual.DeploymentNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentNamespace")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Containers, actual.Containers, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Containers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers or *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ContainerName, actual.ContainerName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContainerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuRequest, actual.CpuRequest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuRequest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MemoryRequest, actual.MemoryRequest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MemoryRequest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuLimit, actual.CpuLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MemoryLimit, actual.MemoryLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MemoryLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncGitNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncGit or *FeatureMembershipConfigmanagementConfigSyncGit", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncGit", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SyncRepo, actual.SyncRepo, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncRepo")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncBranch, actual.SyncBranch, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncBranch")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyDir, actual.PolicyDir, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyDir")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncWaitSecs, actual.SyncWaitSecs, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncWaitSecs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncRev, actual.SyncRev, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncRev")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretType, actual.SecretType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecretType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HttpsProxy, actual.HttpsProxy, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpsProxy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GcpServiceAccountEmail, actual.GcpServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcpServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncOciNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncOci or *FeatureMembershipConfigmanagementConfigSyncOci", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncOci", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SyncRepo, actual.SyncRepo, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("SyncRepo")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyDir, actual.PolicyDir, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("PolicyDir")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncWaitSecs, actual.SyncWaitSecs, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("SyncWaitSecs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretType, actual.SecretType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("SecretType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GcpServiceAccountEmail, actual.GcpServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("GcpServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementPolicyControllerNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementPolicyController) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementPolicyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyController or *FeatureMembershipConfigmanagementPolicyController", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementPolicyController) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementPolicyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyController", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExemptableNamespaces, actual.ExemptableNamespaces, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExemptableNamespaces")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReferentialRulesEnabled, actual.ReferentialRulesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReferentialRulesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LogDeniesEnabled, actual.LogDeniesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogDeniesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MutationEnabled, actual.MutationEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MutationEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Monitoring, actual.Monitoring, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipConfigmanagementPolicyControllerMonitoringNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Monitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TemplateLibraryInstalled, actual.TemplateLibraryInstalled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TemplateLibraryInstalled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AuditIntervalSeconds, actual.AuditIntervalSeconds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AuditIntervalSeconds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementPolicyControllerMonitoringNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyControllerMonitoring or *FeatureMembershipConfigmanagementPolicyControllerMonitoring", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyControllerMonitoring", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Backends, actual.Backends, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Backends")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController or *FeatureMembershipConfigmanagementHierarchyController", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnablePodTreeLabels, actual.EnablePodTreeLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnablePodTreeLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHierarchicalResourceQuota, actual.EnableHierarchicalResourceQuota, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableHierarchicalResourceQuota")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontroller) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontroller) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontroller or *FeatureMembershipPolicycontroller", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontroller) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontroller) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontroller", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyControllerHubConfig, actual.PolicyControllerHubConfig, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyControllerHubConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfig or *FeatureMembershipPolicycontrollerPolicyControllerHubConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstallSpec, actual.InstallSpec, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstallSpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExemptableNamespaces, actual.ExemptableNamespaces, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExemptableNamespaces")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReferentialRulesEnabled, actual.ReferentialRulesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReferentialRulesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LogDeniesEnabled, actual.LogDeniesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogDeniesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MutationEnabled, actual.MutationEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MutationEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Monitoring, actual.Monitoring, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Monitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AuditIntervalSeconds, actual.AuditIntervalSeconds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AuditIntervalSeconds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConstraintViolationLimit, actual.ConstraintViolationLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConstraintViolationLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyContent, actual.PolicyContent, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyContent")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeploymentConfigs, actual.DeploymentConfigs, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Backends, actual.Backends, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Backends")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TemplateLibrary, actual.TemplateLibrary, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TemplateLibrary")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Bundles, actual.Bundles, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Bundles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Installation, actual.Installation, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Installation")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ExemptedNamespaces, actual.ExemptedNamespaces, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExemptedNamespaces")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ReplicaCount, actual.ReplicaCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReplicaCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContainerResources, actual.ContainerResources, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContainerResources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodAffinity, actual.PodAffinity, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodTolerations, actual.PodTolerations, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodTolerations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Limits, actual.Limits, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Limits")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Requests, actual.Requests, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Requests")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Memory, actual.Memory, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Memory")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cpu, actual.Cpu, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cpu")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Memory, actual.Memory, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Memory")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cpu, actual.Cpu, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cpu")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Operator, actual.Operator, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Operator")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Value")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Effect, actual.Effect, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Effect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *FeatureMembership) urlNormalized() *FeatureMembership { + normalized := dcl.Copy(*r).(FeatureMembership) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Feature = dcl.SelfLinkToName(r.Feature) + normalized.Membership = dcl.SelfLinkToName(r.Membership) + normalized.MembershipLocation = dcl.SelfLinkToName(r.MembershipLocation) + return &normalized +} + +func (r *FeatureMembership) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateFeatureMembership" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the FeatureMembership resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *FeatureMembership) marshal(c *Client) ([]byte, error) { + m, err := expandFeatureMembership(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling FeatureMembership: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalFeatureMembership decodes JSON responses into the FeatureMembership resource schema. +func unmarshalFeatureMembership(b []byte, c *Client, res *FeatureMembership) (*FeatureMembership, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapFeatureMembership(m, c, res) +} + +func unmarshalMapFeatureMembership(m map[string]interface{}, c *Client, res *FeatureMembership) (*FeatureMembership, error) { + + flattened := flattenFeatureMembership(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandFeatureMembership expands FeatureMembership into a JSON request object. +func expandFeatureMembership(c *Client, f *FeatureMembership) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := expandFeatureMembershipMesh(c, f.Mesh, res); err != nil { + return nil, fmt.Errorf("error expanding Mesh into mesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["mesh"] = v + } + if v, err := expandFeatureMembershipConfigmanagement(c, f.Configmanagement, res); err != nil { + return nil, fmt.Errorf("error expanding Configmanagement into configmanagement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["configmanagement"] = v + } + if v, err := expandFeatureMembershipPolicycontroller(c, f.Policycontroller, res); err != nil { + return nil, fmt.Errorf("error expanding Policycontroller into policycontroller: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policycontroller"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Feature into feature: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["feature"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Membership into membership: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["membership"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding MembershipLocation into membershipLocation: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["membershipLocation"] = v + } + + return m, nil +} + +// flattenFeatureMembership flattens FeatureMembership from a JSON request object into the +// FeatureMembership type. +func flattenFeatureMembership(c *Client, i interface{}, res *FeatureMembership) *FeatureMembership { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &FeatureMembership{} + resultRes.Mesh = flattenFeatureMembershipMesh(c, m["mesh"], res) + resultRes.Configmanagement = flattenFeatureMembershipConfigmanagement(c, m["configmanagement"], res) + resultRes.Policycontroller = flattenFeatureMembershipPolicycontroller(c, m["policycontroller"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Feature = dcl.FlattenString(m["feature"]) + resultRes.Membership = dcl.FlattenString(m["membership"]) + resultRes.MembershipLocation = dcl.FlattenString(m["membershipLocation"]) + + return resultRes +} + +// expandFeatureMembershipMeshMap expands the contents of FeatureMembershipMesh into a JSON +// request object. +func expandFeatureMembershipMeshMap(c *Client, f map[string]FeatureMembershipMesh, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipMesh(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipMeshSlice expands the contents of FeatureMembershipMesh into a JSON +// request object. +func expandFeatureMembershipMeshSlice(c *Client, f []FeatureMembershipMesh, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipMesh(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipMeshMap flattens the contents of FeatureMembershipMesh from a JSON +// response object. +func flattenFeatureMembershipMeshMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipMesh { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipMesh{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipMesh{} + } + + items := make(map[string]FeatureMembershipMesh) + for k, item := range a { + items[k] = *flattenFeatureMembershipMesh(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipMeshSlice flattens the contents of FeatureMembershipMesh from a JSON +// response object. +func flattenFeatureMembershipMeshSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipMesh { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipMesh{} + } + + if len(a) == 0 { + return []FeatureMembershipMesh{} + } + + items := make([]FeatureMembershipMesh, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipMesh(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipMesh expands an instance of FeatureMembershipMesh into a JSON +// request object. +func expandFeatureMembershipMesh(c *Client, f *FeatureMembershipMesh, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Management; !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + if v := f.ControlPlane; !dcl.IsEmptyValueIndirect(v) { + m["controlPlane"] = v + } + + return m, nil +} + +// flattenFeatureMembershipMesh flattens an instance of FeatureMembershipMesh from a JSON +// response object. +func flattenFeatureMembershipMesh(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipMesh { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipMesh{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipMesh + } + r.Management = flattenFeatureMembershipMeshManagementEnum(m["management"]) + r.ControlPlane = flattenFeatureMembershipMeshControlPlaneEnum(m["controlPlane"]) + + return r +} + +// expandFeatureMembershipConfigmanagementMap expands the contents of FeatureMembershipConfigmanagement into a JSON +// request object. +func expandFeatureMembershipConfigmanagementMap(c *Client, f map[string]FeatureMembershipConfigmanagement, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementSlice expands the contents of FeatureMembershipConfigmanagement into a JSON +// request object. +func expandFeatureMembershipConfigmanagementSlice(c *Client, f []FeatureMembershipConfigmanagement, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementMap flattens the contents of FeatureMembershipConfigmanagement from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagement{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagement{} + } + + items := make(map[string]FeatureMembershipConfigmanagement) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementSlice flattens the contents of FeatureMembershipConfigmanagement from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagement { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagement{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagement{} + } + + items := make([]FeatureMembershipConfigmanagement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagement expands an instance of FeatureMembershipConfigmanagement into a JSON +// request object. +func expandFeatureMembershipConfigmanagement(c *Client, f *FeatureMembershipConfigmanagement, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipConfigmanagementConfigSync(c, f.ConfigSync, res); err != nil { + return nil, fmt.Errorf("error expanding ConfigSync into configSync: %w", err) + } else if v != nil { + m["configSync"] = v + } + if v, err := expandFeatureMembershipConfigmanagementPolicyController(c, f.PolicyController, res); err != nil { + return nil, fmt.Errorf("error expanding PolicyController into policyController: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policyController"] = v + } + if v, err := expandHierarchyControllerConfig(c, f.HierarchyController, res); err != nil { + return nil, fmt.Errorf("error expanding HierarchyController into hierarchyController: %w", err) + } else if v != nil { + m["hierarchyController"] = v + } + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v := f.Management; !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagement flattens an instance of FeatureMembershipConfigmanagement from a JSON +// response object. +func flattenFeatureMembershipConfigmanagement(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagement + } + r.ConfigSync = flattenFeatureMembershipConfigmanagementConfigSync(c, m["configSync"], res) + r.PolicyController = flattenFeatureMembershipConfigmanagementPolicyController(c, m["policyController"], res) + r.HierarchyController = flattenHierarchyControllerConfig(c, m["hierarchyController"], res) + r.Version = dcl.FlattenString(m["version"]) + r.Management = flattenFeatureMembershipConfigmanagementManagementEnum(m["management"]) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncMap expands the contents of FeatureMembershipConfigmanagementConfigSync into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSync, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSync(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncSlice expands the contents of FeatureMembershipConfigmanagementConfigSync into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSync, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSync(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncMap flattens the contents of FeatureMembershipConfigmanagementConfigSync from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSync { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSync{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSync{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSync) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSync(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncSlice flattens the contents of FeatureMembershipConfigmanagementConfigSync from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSync { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSync{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSync{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSync, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSync(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSync expands an instance of FeatureMembershipConfigmanagementConfigSync into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSync(c *Client, f *FeatureMembershipConfigmanagementConfigSync, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c, f.DeploymentOverrides, res); err != nil { + return nil, fmt.Errorf("error expanding DeploymentOverrides into deploymentOverrides: %w", err) + } else if v != nil { + m["deploymentOverrides"] = v + } + if v, err := expandFeatureMembershipConfigmanagementConfigSyncGit(c, f.Git, res); err != nil { + return nil, fmt.Errorf("error expanding Git into git: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["git"] = v + } + if v := f.SourceFormat; !dcl.IsEmptyValueIndirect(v) { + m["sourceFormat"] = v + } + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.StopSyncing; !dcl.IsEmptyValueIndirect(v) { + m["stopSyncing"] = v + } + if v := f.PreventDrift; !dcl.IsEmptyValueIndirect(v) { + m["preventDrift"] = v + } + if v := f.MetricsGcpServiceAccountEmail; !dcl.IsEmptyValueIndirect(v) { + m["metricsGcpServiceAccountEmail"] = v + } + if v, err := expandFeatureMembershipConfigmanagementConfigSyncOci(c, f.Oci, res); err != nil { + return nil, fmt.Errorf("error expanding Oci into oci: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["oci"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSync flattens an instance of FeatureMembershipConfigmanagementConfigSync from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSync(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSync { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSync{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSync + } + r.DeploymentOverrides = flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c, m["deploymentOverrides"], res) + r.Git = flattenFeatureMembershipConfigmanagementConfigSyncGit(c, m["git"], res) + r.SourceFormat = dcl.FlattenString(m["sourceFormat"]) + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.StopSyncing = dcl.FlattenBool(m["stopSyncing"]) + r.PreventDrift = dcl.FlattenBool(m["preventDrift"]) + r.MetricsGcpServiceAccountEmail = dcl.FlattenString(m["metricsGcpServiceAccountEmail"]) + r.Oci = flattenFeatureMembershipConfigmanagementConfigSyncOci(c, m["oci"], res) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides expands an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c *Client, f *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DeploymentName; !dcl.IsEmptyValueIndirect(v) { + m["deploymentName"] = v + } + if v := f.DeploymentNamespace; !dcl.IsEmptyValueIndirect(v) { + m["deploymentNamespace"] = v + } + if v, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c, f.Containers, res); err != nil { + return nil, fmt.Errorf("error expanding Containers into containers: %w", err) + } else if v != nil { + m["containers"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides flattens an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + } + r.DeploymentName = dcl.FlattenString(m["deploymentName"]) + r.DeploymentNamespace = dcl.FlattenString(m["deploymentNamespace"]) + r.Containers = flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c, m["containers"], res) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers expands an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c *Client, f *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ContainerName; !dcl.IsEmptyValueIndirect(v) { + m["containerName"] = v + } + if v := f.CpuRequest; !dcl.IsEmptyValueIndirect(v) { + m["cpuRequest"] = v + } + if v := f.MemoryRequest; !dcl.IsEmptyValueIndirect(v) { + m["memoryRequest"] = v + } + if v := f.CpuLimit; !dcl.IsEmptyValueIndirect(v) { + m["cpuLimit"] = v + } + if v := f.MemoryLimit; !dcl.IsEmptyValueIndirect(v) { + m["memoryLimit"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers flattens an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + } + r.ContainerName = dcl.FlattenString(m["containerName"]) + r.CpuRequest = dcl.FlattenString(m["cpuRequest"]) + r.MemoryRequest = dcl.FlattenString(m["memoryRequest"]) + r.CpuLimit = dcl.FlattenString(m["cpuLimit"]) + r.MemoryLimit = dcl.FlattenString(m["memoryLimit"]) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncGitMap expands the contents of FeatureMembershipConfigmanagementConfigSyncGit into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncGitMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncGit, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncGit(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncGitSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncGit into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncGitSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncGit, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncGit(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncGitMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncGit from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncGitMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncGit { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncGit{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncGit{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncGit) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncGit(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncGitSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncGit from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncGitSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncGit { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncGit{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncGit{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncGit, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncGit(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncGit expands an instance of FeatureMembershipConfigmanagementConfigSyncGit into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncGit(c *Client, f *FeatureMembershipConfigmanagementConfigSyncGit, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SyncRepo; !dcl.IsEmptyValueIndirect(v) { + m["syncRepo"] = v + } + if v := f.SyncBranch; !dcl.IsEmptyValueIndirect(v) { + m["syncBranch"] = v + } + if v := f.PolicyDir; !dcl.IsEmptyValueIndirect(v) { + m["policyDir"] = v + } + if v := f.SyncWaitSecs; !dcl.IsEmptyValueIndirect(v) { + m["syncWaitSecs"] = v + } + if v := f.SyncRev; !dcl.IsEmptyValueIndirect(v) { + m["syncRev"] = v + } + if v := f.SecretType; !dcl.IsEmptyValueIndirect(v) { + m["secretType"] = v + } + if v := f.HttpsProxy; !dcl.IsEmptyValueIndirect(v) { + m["httpsProxy"] = v + } + if v := f.GcpServiceAccountEmail; !dcl.IsEmptyValueIndirect(v) { + m["gcpServiceAccountEmail"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncGit flattens an instance of FeatureMembershipConfigmanagementConfigSyncGit from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncGit(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncGit { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncGit{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncGit + } + r.SyncRepo = dcl.FlattenString(m["syncRepo"]) + r.SyncBranch = dcl.FlattenString(m["syncBranch"]) + r.PolicyDir = dcl.FlattenString(m["policyDir"]) + r.SyncWaitSecs = dcl.FlattenString(m["syncWaitSecs"]) + r.SyncRev = dcl.FlattenString(m["syncRev"]) + r.SecretType = dcl.FlattenString(m["secretType"]) + r.HttpsProxy = dcl.FlattenString(m["httpsProxy"]) + r.GcpServiceAccountEmail = dcl.FlattenString(m["gcpServiceAccountEmail"]) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncOciMap expands the contents of FeatureMembershipConfigmanagementConfigSyncOci into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncOciMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncOci, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncOci(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncOciSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncOci into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncOciSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncOci, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncOci(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncOciMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncOci from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncOciMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncOci { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncOci{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncOci{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncOci) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncOci(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncOciSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncOci from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncOciSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncOci { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncOci{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncOci{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncOci, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncOci(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncOci expands an instance of FeatureMembershipConfigmanagementConfigSyncOci into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncOci(c *Client, f *FeatureMembershipConfigmanagementConfigSyncOci, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SyncRepo; !dcl.IsEmptyValueIndirect(v) { + m["syncRepo"] = v + } + if v := f.PolicyDir; !dcl.IsEmptyValueIndirect(v) { + m["policyDir"] = v + } + if v := f.SyncWaitSecs; !dcl.IsEmptyValueIndirect(v) { + m["syncWaitSecs"] = v + } + if v := f.SecretType; !dcl.IsEmptyValueIndirect(v) { + m["secretType"] = v + } + if v := f.GcpServiceAccountEmail; !dcl.IsEmptyValueIndirect(v) { + m["gcpServiceAccountEmail"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncOci flattens an instance of FeatureMembershipConfigmanagementConfigSyncOci from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncOci(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncOci { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncOci{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncOci + } + r.SyncRepo = dcl.FlattenString(m["syncRepo"]) + r.PolicyDir = dcl.FlattenString(m["policyDir"]) + r.SyncWaitSecs = dcl.FlattenString(m["syncWaitSecs"]) + r.SecretType = dcl.FlattenString(m["secretType"]) + r.GcpServiceAccountEmail = dcl.FlattenString(m["gcpServiceAccountEmail"]) + + return r +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMap expands the contents of FeatureMembershipConfigmanagementPolicyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMap(c *Client, f map[string]FeatureMembershipConfigmanagementPolicyController, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyController(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementPolicyControllerSlice expands the contents of FeatureMembershipConfigmanagementPolicyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerSlice(c *Client, f []FeatureMembershipConfigmanagementPolicyController, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyController(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMap flattens the contents of FeatureMembershipConfigmanagementPolicyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementPolicyController { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementPolicyController{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementPolicyController{} + } + + items := make(map[string]FeatureMembershipConfigmanagementPolicyController) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementPolicyController(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerSlice flattens the contents of FeatureMembershipConfigmanagementPolicyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementPolicyController { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementPolicyController{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementPolicyController{} + } + + items := make([]FeatureMembershipConfigmanagementPolicyController, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementPolicyController(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementPolicyController expands an instance of FeatureMembershipConfigmanagementPolicyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyController(c *Client, f *FeatureMembershipConfigmanagementPolicyController, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.ExemptableNamespaces; v != nil { + m["exemptableNamespaces"] = v + } + if v := f.ReferentialRulesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["referentialRulesEnabled"] = v + } + if v := f.LogDeniesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["logDeniesEnabled"] = v + } + if v := f.MutationEnabled; !dcl.IsEmptyValueIndirect(v) { + m["mutationEnabled"] = v + } + if v, err := expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, f.Monitoring, res); err != nil { + return nil, fmt.Errorf("error expanding Monitoring into monitoring: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoring"] = v + } + if v := f.TemplateLibraryInstalled; !dcl.IsEmptyValueIndirect(v) { + m["templateLibraryInstalled"] = v + } + if v := f.AuditIntervalSeconds; !dcl.IsEmptyValueIndirect(v) { + m["auditIntervalSeconds"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyController flattens an instance of FeatureMembershipConfigmanagementPolicyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyController(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementPolicyController { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementPolicyController{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementPolicyController + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.ExemptableNamespaces = dcl.FlattenStringSlice(m["exemptableNamespaces"]) + r.ReferentialRulesEnabled = dcl.FlattenBool(m["referentialRulesEnabled"]) + r.LogDeniesEnabled = dcl.FlattenBool(m["logDeniesEnabled"]) + r.MutationEnabled = dcl.FlattenBool(m["mutationEnabled"]) + r.Monitoring = flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, m["monitoring"], res) + r.TemplateLibraryInstalled = dcl.FlattenBool(m["templateLibraryInstalled"]) + r.AuditIntervalSeconds = dcl.FlattenString(m["auditIntervalSeconds"]) + + return r +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMonitoringMap expands the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMonitoringMap(c *Client, f map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice expands the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(c *Client, f []FeatureMembershipConfigmanagementPolicyControllerMonitoring, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringMap flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + items := make(map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoring, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMonitoring expands an instance of FeatureMembershipConfigmanagementPolicyControllerMonitoring into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c *Client, f *FeatureMembershipConfigmanagementPolicyControllerMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Backends; v != nil { + m["backends"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring flattens an instance of FeatureMembershipConfigmanagementPolicyControllerMonitoring from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring + } + r.Backends = flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumSlice(c, m["backends"], res) + + return r +} + +// expandFeatureMembershipConfigmanagementHierarchyControllerMap expands the contents of FeatureMembershipConfigmanagementHierarchyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementHierarchyControllerMap(c *Client, f map[string]FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementHierarchyController(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementHierarchyControllerSlice expands the contents of FeatureMembershipConfigmanagementHierarchyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementHierarchyControllerSlice(c *Client, f []FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementHierarchyController(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementHierarchyControllerMap flattens the contents of FeatureMembershipConfigmanagementHierarchyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementHierarchyControllerMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementHierarchyController { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementHierarchyController{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementHierarchyController{} + } + + items := make(map[string]FeatureMembershipConfigmanagementHierarchyController) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementHierarchyController(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementHierarchyControllerSlice flattens the contents of FeatureMembershipConfigmanagementHierarchyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementHierarchyControllerSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementHierarchyController { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementHierarchyController{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementHierarchyController{} + } + + items := make([]FeatureMembershipConfigmanagementHierarchyController, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementHierarchyController(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementHierarchyController expands an instance of FeatureMembershipConfigmanagementHierarchyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementHierarchyController(c *Client, f *FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; v != nil { + m["enabled"] = v + } + if v := f.EnablePodTreeLabels; v != nil { + m["enablePodTreeLabels"] = v + } + if v := f.EnableHierarchicalResourceQuota; v != nil { + m["enableHierarchicalResourceQuota"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementHierarchyController flattens an instance of FeatureMembershipConfigmanagementHierarchyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementHierarchyController(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementHierarchyController { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementHierarchyController{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementHierarchyController + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.EnablePodTreeLabels = dcl.FlattenBool(m["enablePodTreeLabels"]) + r.EnableHierarchicalResourceQuota = dcl.FlattenBool(m["enableHierarchicalResourceQuota"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerMap expands the contents of FeatureMembershipPolicycontroller into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerMap(c *Client, f map[string]FeatureMembershipPolicycontroller, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontroller(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerSlice expands the contents of FeatureMembershipPolicycontroller into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerSlice(c *Client, f []FeatureMembershipPolicycontroller, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontroller(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerMap flattens the contents of FeatureMembershipPolicycontroller from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontroller { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontroller{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontroller{} + } + + items := make(map[string]FeatureMembershipPolicycontroller) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontroller(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerSlice flattens the contents of FeatureMembershipPolicycontroller from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontroller { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontroller{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontroller{} + } + + items := make([]FeatureMembershipPolicycontroller, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontroller(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontroller expands an instance of FeatureMembershipPolicycontroller into a JSON +// request object. +func expandFeatureMembershipPolicycontroller(c *Client, f *FeatureMembershipPolicycontroller, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, f.PolicyControllerHubConfig, res); err != nil { + return nil, fmt.Errorf("error expanding PolicyControllerHubConfig into policyControllerHubConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policyControllerHubConfig"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontroller flattens an instance of FeatureMembershipPolicycontroller from a JSON +// response object. +func flattenFeatureMembershipPolicycontroller(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontroller { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontroller{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontroller + } + r.Version = dcl.FlattenString(m["version"]) + r.PolicyControllerHubConfig = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, m["policyControllerHubConfig"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfig, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfig into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfig, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstallSpec; !dcl.IsEmptyValueIndirect(v) { + m["installSpec"] = v + } + if v := f.ExemptableNamespaces; v != nil { + m["exemptableNamespaces"] = v + } + if v := f.ReferentialRulesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["referentialRulesEnabled"] = v + } + if v := f.LogDeniesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["logDeniesEnabled"] = v + } + if v := f.MutationEnabled; !dcl.IsEmptyValueIndirect(v) { + m["mutationEnabled"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, f.Monitoring, res); err != nil { + return nil, fmt.Errorf("error expanding Monitoring into monitoring: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoring"] = v + } + if v := f.AuditIntervalSeconds; !dcl.IsEmptyValueIndirect(v) { + m["auditIntervalSeconds"] = v + } + if v := f.ConstraintViolationLimit; !dcl.IsEmptyValueIndirect(v) { + m["constraintViolationLimit"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, f.PolicyContent, res); err != nil { + return nil, fmt.Errorf("error expanding PolicyContent into policyContent: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policyContent"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c, f.DeploymentConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding DeploymentConfigs into deploymentConfigs: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["deploymentConfigs"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfig from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } + r.InstallSpec = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(m["installSpec"]) + r.ExemptableNamespaces = dcl.FlattenStringSlice(m["exemptableNamespaces"]) + r.ReferentialRulesEnabled = dcl.FlattenBool(m["referentialRulesEnabled"]) + r.LogDeniesEnabled = dcl.FlattenBool(m["logDeniesEnabled"]) + r.MutationEnabled = dcl.FlattenBool(m["mutationEnabled"]) + r.Monitoring = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, m["monitoring"], res) + r.AuditIntervalSeconds = dcl.FlattenInteger(m["auditIntervalSeconds"]) + r.ConstraintViolationLimit = dcl.FlattenInteger(m["constraintViolationLimit"]) + r.PolicyContent = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, m["policyContent"], res) + r.DeploymentConfigs = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c, m["deploymentConfigs"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Backends; v != nil { + m["backends"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + } + r.Backends = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumSlice(c, m["backends"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, f.TemplateLibrary, res); err != nil { + return nil, fmt.Errorf("error expanding TemplateLibrary into templateLibrary: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["templateLibrary"] = v + } + if v, err := alsoExpandEmptyBundlesInMap(c, f.Bundles, res); err != nil { + return nil, fmt.Errorf("error expanding Bundles into bundles: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["bundles"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + } + r.TemplateLibrary = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, m["templateLibrary"], res) + r.Bundles = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(c, m["bundles"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Installation; !dcl.IsEmptyValueIndirect(v) { + m["installation"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + } + r.Installation = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(m["installation"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ExemptedNamespaces; v != nil { + m["exemptedNamespaces"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } + r.ExemptedNamespaces = dcl.FlattenStringSlice(m["exemptedNamespaces"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ReplicaCount; !dcl.IsEmptyValueIndirect(v) { + m["replicaCount"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, f.ContainerResources, res); err != nil { + return nil, fmt.Errorf("error expanding ContainerResources into containerResources: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["containerResources"] = v + } + if v := f.PodAffinity; !dcl.IsEmptyValueIndirect(v) { + m["podAffinity"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c, f.PodTolerations, res); err != nil { + return nil, fmt.Errorf("error expanding PodTolerations into podTolerations: %w", err) + } else if v != nil { + m["podTolerations"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + } + r.ReplicaCount = dcl.FlattenInteger(m["replicaCount"]) + r.ContainerResources = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, m["containerResources"], res) + r.PodAffinity = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(m["podAffinity"]) + r.PodTolerations = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c, m["podTolerations"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, f.Limits, res); err != nil { + return nil, fmt.Errorf("error expanding Limits into limits: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["limits"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, f.Requests, res); err != nil { + return nil, fmt.Errorf("error expanding Requests into requests: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["requests"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + r.Limits = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, m["limits"], res) + r.Requests = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, m["requests"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Memory; !dcl.IsEmptyValueIndirect(v) { + m["memory"] = v + } + if v := f.Cpu; !dcl.IsEmptyValueIndirect(v) { + m["cpu"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + r.Memory = dcl.FlattenString(m["memory"]) + r.Cpu = dcl.FlattenString(m["cpu"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Memory; !dcl.IsEmptyValueIndirect(v) { + m["memory"] = v + } + if v := f.Cpu; !dcl.IsEmptyValueIndirect(v) { + m["cpu"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + r.Memory = dcl.FlattenString(m["memory"]) + r.Cpu = dcl.FlattenString(m["cpu"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Operator; !dcl.IsEmptyValueIndirect(v) { + m["operator"] = v + } + if v := f.Value; !dcl.IsEmptyValueIndirect(v) { + m["value"] = v + } + if v := f.Effect; !dcl.IsEmptyValueIndirect(v) { + m["effect"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } + r.Key = dcl.FlattenString(m["key"]) + r.Operator = dcl.FlattenString(m["operator"]) + r.Value = dcl.FlattenString(m["value"]) + r.Effect = dcl.FlattenString(m["effect"]) + + return r +} + +// flattenFeatureMembershipMeshManagementEnumMap flattens the contents of FeatureMembershipMeshManagementEnum from a JSON +// response object. +func flattenFeatureMembershipMeshManagementEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipMeshManagementEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipMeshManagementEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipMeshManagementEnum{} + } + + items := make(map[string]FeatureMembershipMeshManagementEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipMeshManagementEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipMeshManagementEnumSlice flattens the contents of FeatureMembershipMeshManagementEnum from a JSON +// response object. +func flattenFeatureMembershipMeshManagementEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipMeshManagementEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipMeshManagementEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipMeshManagementEnum{} + } + + items := make([]FeatureMembershipMeshManagementEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipMeshManagementEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipMeshManagementEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipMeshManagementEnum with the same value as that string. +func flattenFeatureMembershipMeshManagementEnum(i interface{}) *FeatureMembershipMeshManagementEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipMeshManagementEnumRef(s) +} + +// flattenFeatureMembershipMeshControlPlaneEnumMap flattens the contents of FeatureMembershipMeshControlPlaneEnum from a JSON +// response object. +func flattenFeatureMembershipMeshControlPlaneEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipMeshControlPlaneEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipMeshControlPlaneEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipMeshControlPlaneEnum{} + } + + items := make(map[string]FeatureMembershipMeshControlPlaneEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipMeshControlPlaneEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipMeshControlPlaneEnumSlice flattens the contents of FeatureMembershipMeshControlPlaneEnum from a JSON +// response object. +func flattenFeatureMembershipMeshControlPlaneEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipMeshControlPlaneEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipMeshControlPlaneEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipMeshControlPlaneEnum{} + } + + items := make([]FeatureMembershipMeshControlPlaneEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipMeshControlPlaneEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipMeshControlPlaneEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipMeshControlPlaneEnum with the same value as that string. +func flattenFeatureMembershipMeshControlPlaneEnum(i interface{}) *FeatureMembershipMeshControlPlaneEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipMeshControlPlaneEnumRef(s) +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumMap flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + items := make(map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumSlice flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum with the same value as that string. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(i interface{}) *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(s) +} + +// flattenFeatureMembershipConfigmanagementManagementEnumMap flattens the contents of FeatureMembershipConfigmanagementManagementEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementManagementEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementManagementEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementManagementEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementManagementEnum{} + } + + items := make(map[string]FeatureMembershipConfigmanagementManagementEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementManagementEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementManagementEnumSlice flattens the contents of FeatureMembershipConfigmanagementManagementEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementManagementEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementManagementEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementManagementEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementManagementEnum{} + } + + items := make([]FeatureMembershipConfigmanagementManagementEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementManagementEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementManagementEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipConfigmanagementManagementEnum with the same value as that string. +func flattenFeatureMembershipConfigmanagementManagementEnum(i interface{}) *FeatureMembershipConfigmanagementManagementEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipConfigmanagementManagementEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *FeatureMembership) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalFeatureMembership(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Feature == nil && ncr.Feature == nil { + c.Config.Logger.Info("Both Feature fields null - considering equal.") + } else if nr.Feature == nil || ncr.Feature == nil { + c.Config.Logger.Info("Only one Feature field is null - considering unequal.") + return false + } else if *nr.Feature != *ncr.Feature { + return false + } + return true + } +} + +type featureMembershipDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp featureMembershipApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToFeatureMembershipDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]featureMembershipDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []featureMembershipDiff + // For each operation name, create a featureMembershipDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := featureMembershipDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToFeatureMembershipApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToFeatureMembershipApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (featureMembershipApiOperation, error) { + switch opName { + + case "updateFeatureMembershipUpdateFeatureMembershipOperation": + return &updateFeatureMembershipUpdateFeatureMembershipOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractFeatureMembershipFields(r *FeatureMembership) error { + vMesh := r.Mesh + if vMesh == nil { + // note: explicitly not the empty object. + vMesh = &FeatureMembershipMesh{} + } + if err := extractFeatureMembershipMeshFields(r, vMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMesh) { + r.Mesh = vMesh + } + vConfigmanagement := r.Configmanagement + if vConfigmanagement == nil { + // note: explicitly not the empty object. + vConfigmanagement = &FeatureMembershipConfigmanagement{} + } + if err := extractFeatureMembershipConfigmanagementFields(r, vConfigmanagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigmanagement) { + r.Configmanagement = vConfigmanagement + } + vPolicycontroller := r.Policycontroller + if vPolicycontroller == nil { + // note: explicitly not the empty object. + vPolicycontroller = &FeatureMembershipPolicycontroller{} + } + if err := extractFeatureMembershipPolicycontrollerFields(r, vPolicycontroller); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicycontroller) { + r.Policycontroller = vPolicycontroller + } + return nil +} +func extractFeatureMembershipMeshFields(r *FeatureMembership, o *FeatureMembershipMesh) error { + return nil +} +func extractFeatureMembershipConfigmanagementFields(r *FeatureMembership, o *FeatureMembershipConfigmanagement) error { + vConfigSync := o.ConfigSync + if vConfigSync == nil { + // note: explicitly not the empty object. + vConfigSync = &FeatureMembershipConfigmanagementConfigSync{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncFields(r, vConfigSync); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigSync) { + o.ConfigSync = vConfigSync + } + vPolicyController := o.PolicyController + if vPolicyController == nil { + // note: explicitly not the empty object. + vPolicyController = &FeatureMembershipConfigmanagementPolicyController{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerFields(r, vPolicyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyController) { + o.PolicyController = vPolicyController + } + vHierarchyController := o.HierarchyController + if vHierarchyController == nil { + // note: explicitly not the empty object. + vHierarchyController = &FeatureMembershipConfigmanagementHierarchyController{} + } + if err := extractFeatureMembershipConfigmanagementHierarchyControllerFields(r, vHierarchyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHierarchyController) { + o.HierarchyController = vHierarchyController + } + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSync) error { + vGit := o.Git + if vGit == nil { + // note: explicitly not the empty object. + vGit = &FeatureMembershipConfigmanagementConfigSyncGit{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncGitFields(r, vGit); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGit) { + o.Git = vGit + } + vOci := o.Oci + if vOci == nil { + // note: explicitly not the empty object. + vOci = &FeatureMembershipConfigmanagementConfigSyncOci{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncOciFields(r, vOci); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vOci) { + o.Oci = vOci + } + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) error { + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) error { + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncGitFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncGit) error { + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncOciFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncOci) error { + return nil +} +func extractFeatureMembershipConfigmanagementPolicyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyController) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + return nil +} +func extractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyControllerMonitoring) error { + return nil +} +func extractFeatureMembershipConfigmanagementHierarchyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementHierarchyController) error { + return nil +} +func extractFeatureMembershipPolicycontrollerFields(r *FeatureMembership, o *FeatureMembershipPolicycontroller) error { + vPolicyControllerHubConfig := o.PolicyControllerHubConfig + if vPolicyControllerHubConfig == nil { + // note: explicitly not the empty object. + vPolicyControllerHubConfig = &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r, vPolicyControllerHubConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyControllerHubConfig) { + o.PolicyControllerHubConfig = vPolicyControllerHubConfig + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + vPolicyContent := o.PolicyContent + if vPolicyContent == nil { + // note: explicitly not the empty object. + vPolicyContent = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r, vPolicyContent); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyContent) { + o.PolicyContent = vPolicyContent + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) error { + vTemplateLibrary := o.TemplateLibrary + if vTemplateLibrary == nil { + // note: explicitly not the empty object. + vTemplateLibrary = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r, vTemplateLibrary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTemplateLibrary) { + o.TemplateLibrary = vTemplateLibrary + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) error { + vContainerResources := o.ContainerResources + if vContainerResources == nil { + // note: explicitly not the empty object. + vContainerResources = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r, vContainerResources); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vContainerResources) { + o.ContainerResources = vContainerResources + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) error { + vLimits := o.Limits + if vLimits == nil { + // note: explicitly not the empty object. + vLimits = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r, vLimits); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLimits) { + o.Limits = vLimits + } + vRequests := o.Requests + if vRequests == nil { + // note: explicitly not the empty object. + vRequests = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r, vRequests); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRequests) { + o.Requests = vRequests + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) error { + return nil +} + +func postReadExtractFeatureMembershipFields(r *FeatureMembership) error { + vMesh := r.Mesh + if vMesh == nil { + // note: explicitly not the empty object. + vMesh = &FeatureMembershipMesh{} + } + if err := postReadExtractFeatureMembershipMeshFields(r, vMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMesh) { + r.Mesh = vMesh + } + vConfigmanagement := r.Configmanagement + if vConfigmanagement == nil { + // note: explicitly not the empty object. + vConfigmanagement = &FeatureMembershipConfigmanagement{} + } + if err := postReadExtractFeatureMembershipConfigmanagementFields(r, vConfigmanagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigmanagement) { + r.Configmanagement = vConfigmanagement + } + vPolicycontroller := r.Policycontroller + if vPolicycontroller == nil { + // note: explicitly not the empty object. + vPolicycontroller = &FeatureMembershipPolicycontroller{} + } + if err := postReadExtractFeatureMembershipPolicycontrollerFields(r, vPolicycontroller); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicycontroller) { + r.Policycontroller = vPolicycontroller + } + return nil +} +func postReadExtractFeatureMembershipMeshFields(r *FeatureMembership, o *FeatureMembershipMesh) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementFields(r *FeatureMembership, o *FeatureMembershipConfigmanagement) error { + vConfigSync := o.ConfigSync + if vConfigSync == nil { + // note: explicitly not the empty object. + vConfigSync = &FeatureMembershipConfigmanagementConfigSync{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncFields(r, vConfigSync); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigSync) { + o.ConfigSync = vConfigSync + } + vPolicyController := o.PolicyController + if vPolicyController == nil { + // note: explicitly not the empty object. + vPolicyController = &FeatureMembershipConfigmanagementPolicyController{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerFields(r, vPolicyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyController) { + o.PolicyController = vPolicyController + } + vHierarchyController := o.HierarchyController + if vHierarchyController == nil { + // note: explicitly not the empty object. + vHierarchyController = &FeatureMembershipConfigmanagementHierarchyController{} + } + if err := extractFeatureMembershipConfigmanagementHierarchyControllerFields(r, vHierarchyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHierarchyController) { + o.HierarchyController = vHierarchyController + } + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSync) error { + vGit := o.Git + if vGit == nil { + // note: explicitly not the empty object. + vGit = &FeatureMembershipConfigmanagementConfigSyncGit{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncGitFields(r, vGit); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGit) { + o.Git = vGit + } + vOci := o.Oci + if vOci == nil { + // note: explicitly not the empty object. + vOci = &FeatureMembershipConfigmanagementConfigSyncOci{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncOciFields(r, vOci); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vOci) { + o.Oci = vOci + } + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncGitFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncGit) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncOciFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncOci) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementPolicyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyController) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + return nil +} +func postReadExtractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyControllerMonitoring) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementHierarchyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementHierarchyController) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerFields(r *FeatureMembership, o *FeatureMembershipPolicycontroller) error { + vPolicyControllerHubConfig := o.PolicyControllerHubConfig + if vPolicyControllerHubConfig == nil { + // note: explicitly not the empty object. + vPolicyControllerHubConfig = &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r, vPolicyControllerHubConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyControllerHubConfig) { + o.PolicyControllerHubConfig = vPolicyControllerHubConfig + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + vPolicyContent := o.PolicyContent + if vPolicyContent == nil { + // note: explicitly not the empty object. + vPolicyContent = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r, vPolicyContent); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyContent) { + o.PolicyContent = vPolicyContent + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) error { + vTemplateLibrary := o.TemplateLibrary + if vTemplateLibrary == nil { + // note: explicitly not the empty object. + vTemplateLibrary = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r, vTemplateLibrary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTemplateLibrary) { + o.TemplateLibrary = vTemplateLibrary + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) error { + vContainerResources := o.ContainerResources + if vContainerResources == nil { + // note: explicitly not the empty object. + vContainerResources = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r, vContainerResources); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vContainerResources) { + o.ContainerResources = vContainerResources + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) error { + vLimits := o.Limits + if vLimits == nil { + // note: explicitly not the empty object. + vLimits = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r, vLimits); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLimits) { + o.Limits = vLimits + } + vRequests := o.Requests + if vRequests == nil { + // note: explicitly not the empty object. + vRequests = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r, vRequests); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRequests) { + o.Requests = vRequests + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl new file mode 100644 index 000000000000..7bba499538b1 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl @@ -0,0 +1,363 @@ +package gkehub + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" + "google.golang.org/api/googleapi" +) + +// getMembershipSpecs returns a map of membership specs taken from the get response of the feature membership's feature object. +func getMembershipSpecs(ctx context.Context, r *FeatureMembership, c *Client) (map[string]any, error) { + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + membershipSpecs, ok := m["membershipSpecs"].(map[string]any) + if !ok { + return map[string]any{}, nil + } + return membershipSpecs, nil +} + +// Return the value if it exists, default otherwise +func valueOrDefaultString(val *string, def string) string { + if dcl.ValueOrEmptyString(val) == "" { + return def + } + return dcl.ValueOrEmptyString(val) +} + +// Return the full key for a given FeatureMembership's entry in the membershipSpecs field. +func membershipSpecKey(r *FeatureMembership) string { + params := map[string]any{ + "project": dcl.ValueOrEmptyString(r.Project), + "location": valueOrDefaultString(r.MembershipLocation, "global"), + "membership": dcl.ValueOrEmptyString(r.Membership), + } + + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}membership{{ "}}" }}", params) +} + +// Find and return the key and value in membershipSpecs matching the given membership. +func findMembershipSpec(membership string, membershipLocation string, membershipSpecs map[string]any) (string, map[string]any, error) { + for key, value := range membershipSpecs { + if strings.HasSuffix(key, fmt.Sprintf("%s/memberships/%s", membershipLocation, membership)) { + spec, ok := value.(map[string]any) + if !ok { + return "", nil, errors.New("membership spec was not of map type") + } + return key, spec, nil + } + } + return "", nil, &googleapi.Error{ + Code: 404, + Message: "feature membership not found in feature membership specs", + } +} + +func sendFeatureUpdate(ctx context.Context, req map[string]any, c *Client, u string) error { + c.Config.Logger.Infof("Created update: %#v", req) + body, err := json.Marshal(req) + if err != nil { + return err + } + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": "membershipSpecs"}) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(ctx, c.Config, "https://gkehub.googleapis.com/v1beta/", "GET") + + if err != nil { + return err + } + + return nil +} + +func (op *createFeatureMembershipOperation) do(ctx context.Context, r *FeatureMembership, c *Client) error { + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + + nr := r.urlNormalized() + membershipSpecs, err := getMembershipSpecs(ctx, nr, c) + if err != nil { + return err + } + m, err := expandFeatureMembership(c, nr) + if err != nil { + return err + } + if err := dcl.PutMapEntry(membershipSpecs, []string{membershipSpecKey(nr)}, m); err != nil { + return err + } + req := map[string]any{ + "membershipSpecs": membershipSpecs, + } + return sendFeatureUpdate(ctx, req, c, u) +} + +// GetFeatureMembership returns a feature membership object retrieved from the membershipSpecs field of a feature. +func (c *Client) GetFeatureMembership(ctx context.Context, r *FeatureMembership) (*FeatureMembership, error) { + nr := r.urlNormalized() + membershipSpecs, err := getMembershipSpecs(ctx, nr, c) + if err != nil { + return nil, err + } + _, spec, err := findMembershipSpec(dcl.ValueOrEmptyString(nr.Membership), valueOrDefaultString(nr.MembershipLocation, "global"), membershipSpecs) + if err != nil { + return nil, err + } + result, err := unmarshalMapFeatureMembership(spec, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Feature = r.Feature + result.Membership = r.Membership + + c.Config.Logger.Infof("Retrieved raw result state: %v", result) + c.Config.Logger.Infof("Canonicalizing with specified state: %v", r) + result, err = canonicalizeFeatureMembershipNewState(c, result, r) + if err != nil { + return nil, err + } + c.Config.Logger.Infof("Created result state: %v", result) + + return result, nil +} + +// HasNext always returns false because a feature membership list never has a next page. +func (l *FeatureMembershipList) HasNext() bool { + return false +} + +// Next returns nil because it will never be called. +func (l *FeatureMembershipList) Next(_ context.Context, _ *Client) error { + return nil +} + +// ListFeatureMembership returns a list of feature memberships retrieved from the membershipSpecs field of a feature. +func (c *Client) ListFeatureMembership(ctx context.Context, project, location, feature string) (*FeatureMembershipList, error) { + r := &FeatureMembership{ + Project: &project, + Location: &location, + Feature: &feature, + } + membershipSpecs, err := getMembershipSpecs(ctx, r, c) + if err != nil { + return nil, err + } + var list *FeatureMembershipList + for key, spec := range membershipSpecs { + m, ok := spec.(map[string]any) + if !ok { + return nil, errors.New("membership spec was not of map type") + } + ri, err := unmarshalMapFeatureMembership(m, c, r) + if err != nil { + return nil, err + } + ri.Project = r.Project + ri.Location = r.Location + ri.Feature = r.Feature + ri.Membership = dcl.SelfLinkToName(&key) + list.Items = append(list.Items, ri) + } + return list, nil +} + +func (op *updateFeatureMembershipUpdateFeatureMembershipOperation) do(ctx context.Context, r *FeatureMembership, c *Client) error { + nr := r.urlNormalized() + u, err := r.updateURL(c.Config.BasePath, "UpdateFeatureMembership") + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + + membershipSpecs, err := getMembershipSpecs(ctx, r, c) + if err != nil { + return err + } + key, _, err := findMembershipSpec(dcl.ValueOrEmptyString(nr.Membership), valueOrDefaultString(nr.MembershipLocation, "global"), membershipSpecs) + if err != nil { + return err + } + m, err := expandFeatureMembership(c, r) + if err != nil { + return err + } + if err := dcl.PutMapEntry(membershipSpecs, []string{key}, m); err != nil { + return err + } + req := map[string]any{ + "membershipSpecs": membershipSpecs, + } + return sendFeatureUpdate(ctx, req, c, u) +} + +func (op *deleteFeatureMembershipOperation) do(ctx context.Context, r *FeatureMembership, c *Client) error { + nr := r.urlNormalized() + u, err := nr.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + + membershipSpecs, err := getMembershipSpecs(ctx, nr, c) + if err != nil { + return err + } + key, _, err := findMembershipSpec(dcl.ValueOrEmptyString(nr.Membership), valueOrDefaultString(nr.MembershipLocation, "global"), membershipSpecs) + if err != nil { + return err + } + membershipSpecs[key] = map[string]any{} + req := map[string]any{ + "membershipSpecs": membershipSpecs, + } + return sendFeatureUpdate(ctx, req, c, u) +} + +// CompareFeatureMembershipConfigmanagementHierarchyControllerNewStyle exists only for unit-testing the diff library. +func CompareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(d, a any, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + return compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(d, a, fn) +} + +// This function behaves the same way as the generated diff function, except that it explicitly +// checks for emptiness as well. +func emptyHNCSameAsAllFalse(d, a any) bool { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + fmt.Printf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController or *FeatureMembershipConfigmanagementHierarchyController\n", d) + return false + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + fmt.Printf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController\n", a) + return false + } + actual = &actualNotPointer + } + + if actual == nil && desired == nil { + return true + } + if actual == nil || desired == nil { + return false + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, dcl.FieldName{FieldName: "Enabled"}); len(ds) != 0 || err != nil { + if err != nil { + fmt.Print(err) + return false + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnablePodTreeLabels, actual.EnablePodTreeLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, dcl.FieldName{FieldName: "EnablePodTreeLabels"}); len(ds) != 0 || err != nil { + if err != nil { + fmt.Print(err) + return false + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHierarchicalResourceQuota, actual.EnableHierarchicalResourceQuota, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, dcl.FieldName{FieldName: "EnableHierarchicalResourceQuota"}); len(ds) != 0 || err != nil { + if err != nil { + fmt.Print(err) + return false + } + diffs = append(diffs, ds...) + } + + if len(diffs) > 0 { + return false + } + + if desired.Empty() != actual.Empty() { + return false + } + return true +} + +func flattenHierarchyControllerConfig(c *Client, i any, v *FeatureMembership) *FeatureMembershipConfigmanagementHierarchyController { + m, ok := i.(map[string]any) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementHierarchyController{} + + // Compared to the generated code, we removed the part where we skip flattening the API response + // if the return value is empty (i.e. HNC = {}). This is because the Hub API returns the same + // empty object for both {} (empty config) and {fieldA: false, fieldB: false, fieldC: false}. We + // always flatten the response into the latter form i.e. explicitly stating false values, so that + // it fits more easily into the declarative pattern and avoids a permadiff bug. + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.EnablePodTreeLabels = dcl.FlattenBool(m["enablePodTreeLabels"]) + r.EnableHierarchicalResourceQuota = dcl.FlattenBool(m["enableHierarchicalResourceQuota"]) + + return r +} + +func expandHierarchyControllerConfig(c *Client, f *FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) (map[string]any, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]any) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.EnablePodTreeLabels; !dcl.IsEmptyValueIndirect(v) { + m["enablePodTreeLabels"] = v + } + if v := f.EnableHierarchicalResourceQuota; !dcl.IsEmptyValueIndirect(v) { + m["enableHierarchicalResourceQuota"] = v + } + + return m, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/poco_utils.go b/mmv1/third_party/terraform/services/gkehub/poco_utils.go new file mode 100644 index 000000000000..e8b89b89cb3a --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/poco_utils.go @@ -0,0 +1,27 @@ +package gkehub + +func alsoExpandEmptyBundlesInMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]any, error) { + if len(f) == 0 { + return nil, nil + } + + items := make(map[string]any) + for k, v := range f { + i, err := alsoExpandEmptyBundles(c, &v, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + return items, nil +} + +func alsoExpandEmptyBundles(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]any, error) { + m := make(map[string]any) + if v := f.ExemptedNamespaces; v != nil { + m["exemptedNamespaces"] = v + } + return m, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go new file mode 100644 index 000000000000..5decc37e9671 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLGkeHubClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.GKEHubBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl new file mode 100644 index 000000000000..b551a9291458 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl @@ -0,0 +1,1886 @@ +package gkehub + +import ( + "context" + "fmt" + "log" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" +{{- end }} + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceGkeHubFeatureMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceGkeHubFeatureMembershipCreate, + Read: resourceGkeHubFeatureMembershipRead, + Update: resourceGkeHubFeatureMembershipUpdate, + Delete: resourceGkeHubFeatureMembershipDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGkeHubFeatureMembershipImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, +{{- if ne $.TargetVersionName "ga" }} + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), +{{- end }} + + Schema: map[string]*schema.Schema{ + "feature": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the feature", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location of the feature", + }, + + "membership": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the membership", + }, + + "configmanagement": { + Type: schema.TypeList, + Optional: true, + Description: "Config Management-specific spec.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementSchema(), + }, + + "membership_location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The location of the membership", + }, + + "mesh": { + Type: schema.TypeList, + Optional: true, + Description: "Manage Mesh Features", + MaxItems: 1, + Elem: GkeHubFeatureMembershipMeshSchema(), + }, + + "policycontroller": { + Type: schema.TypeList, + Optional: true, + Description: "Policy Controller-specific spec.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project of the feature", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_sync": { + Type: schema.TypeList, + Optional: true, + Description: "Config Sync configuration for the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncSchema(), + }, + + "hierarchy_controller": { + Type: schema.TypeList, + Optional: true, + Description: "Hierarchy Controller configuration for the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementHierarchyControllerSchema(), + }, + + "management": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.", + }, + + "policy_controller": { + Type: schema.TypeList, + Optional: true, + Description: "**DEPRECATED** Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema(), + }, + + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Version of ACM to install. Defaults to the latest version.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_overrides": { + Type: schema.TypeList, + Optional: true, + Description: "The override configurations for the Config Sync Deployments.", + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSchema(), + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", + }, + + "git": { + Type: schema.TypeList, + Optional: true, + Description: "", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncGitSchema(), + }, + + "metrics_gcp_service_account_email": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring.", + }, + + "oci": { + Type: schema.TypeList, + Optional: true, + Description: "", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncOciSchema(), + }, + + "prevent_drift": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", + }, + + "source_format": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", + }, + + "stop_syncing": { + Type: schema.TypeBool, + Optional: true, + Description: "Set to true to stop syncing configs for a single cluster. Default: false.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containers": { + Type: schema.TypeList, + Optional: true, + Description: "The override configurations for the containers in the Deployment.", + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSchema(), + }, + + "deployment_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the Deployment.", + }, + + "deployment_namespace": { + Type: schema.TypeString, + Optional: true, + Description: "The namespace of the Deployment.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the container.", + }, + + "cpu_limit": { + Type: schema.TypeString, + Optional: true, + Description: "The CPU limit of the container.", + }, + + "cpu_request": { + Type: schema.TypeString, + Optional: true, + Description: "The CPU request of the container.", + }, + + "memory_limit": { + Type: schema.TypeString, + Optional: true, + Description: "The memory limit of the container.", + }, + + "memory_request": { + Type: schema.TypeString, + Optional: true, + Description: "The memory request of the container.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncGitSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gcp_service_account_email": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The GCP Service Account Email used for auth when secretType is gcpServiceAccount.", + }, + + "https_proxy": { + Type: schema.TypeString, + Optional: true, + Description: "URL for the HTTPS proxy to be used when communicating with the Git repo.", + }, + + "policy_dir": { + Type: schema.TypeString, + Optional: true, + Description: "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", + }, + + "secret_type": { + Type: schema.TypeString, + Optional: true, + Description: "Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount or none. The validation of this is case-sensitive.", + }, + + "sync_branch": { + Type: schema.TypeString, + Optional: true, + Description: "The branch of the repository to sync from. Default: master.", + }, + + "sync_repo": { + Type: schema.TypeString, + Optional: true, + Description: "The URL of the Git repository to use as the source of truth.", + }, + + "sync_rev": { + Type: schema.TypeString, + Optional: true, + Description: "Git revision (tag or hash) to check out. Default HEAD.", + }, + + "sync_wait_secs": { + Type: schema.TypeString, + Optional: true, + Description: "Period in seconds between consecutive syncs. Default: 15.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncOciSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gcp_service_account_email": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The GCP Service Account Email used for auth when secret_type is gcpserviceaccount. ", + }, + + "policy_dir": { + Type: schema.TypeString, + Optional: true, + Description: "The absolute path of the directory that contains the local resources. Default: the root directory of the image.", + }, + + "secret_type": { + Type: schema.TypeString, + Optional: true, + Description: "Type of secret configured for access to the OCI Image. Must be one of gcenode, gcpserviceaccount or none. The validation of this is case-sensitive.", + }, + + "sync_repo": { + Type: schema.TypeString, + Optional: true, + Description: "The OCI image repository URL for the package to sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME.", + }, + + "sync_wait_secs": { + Type: schema.TypeString, + Optional: true, + Description: "Period in seconds(int64 format) between consecutive syncs. Default: 15.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementHierarchyControllerSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_hierarchical_resource_quota": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether hierarchical resource quota is enabled in this cluster.", + }, + + "enable_pod_tree_labels": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether pod tree labels are enabled in this cluster.", + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_interval_seconds": { + Type: schema.TypeString, + Optional: true, + Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the installation of Policy Controller. If false, the rest of PolicyController fields take no effect.", + }, + + "exemptable_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "log_denies_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Logs all denies and dry run failures.", + }, + + "monitoring": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema(), + }, + + "mutation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable or disable mutation in policy controller. If true, mutation CRDs, webhook and controller deployment will be deployed to the cluster.", + }, + + "referential_rules_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", + }, + + "template_library_installed": { + Type: schema.TypeBool, + Optional: true, + Description: "Installs the default template library along with Policy Controller.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backends": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func GkeHubFeatureMembershipMeshSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "control_plane": { + Type: schema.TypeString, + Optional: true, + Description: "**DEPRECATED** Whether to automatically manage Service Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED, AUTOMATIC, MANUAL", + Deprecated: "Deprecated in favor of the `management` field", + }, + + "management": { + Type: schema.TypeString, + Optional: true, + Description: "Whether to automatically manage Service Mesh. Possible values: MANAGEMENT_UNSPECIFIED, MANAGEMENT_AUTOMATIC, MANAGEMENT_MANUAL", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy_controller_hub_config": { + Type: schema.TypeList, + Required: true, + Description: "Policy Controller configuration for the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigSchema(), + }, + + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Version of Policy Controller to install. Defaults to the latest version.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_interval_seconds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", + }, + + "constraint_violation_limit": { + Type: schema.TypeInt, + Optional: true, + Description: "The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used.", + }, + + "deployment_configs": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: "Map of deployment configs to deployments (\"admission\", \"audit\", \"mutation\").", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema(), + Set: schema.HashResource(GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema()), + }, + + "exemptable_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "install_spec": { + Type: schema.TypeString, + Optional: true, + Description: "Configures the mode of the Policy Controller installation. Possible values: INSTALL_SPEC_UNSPECIFIED, INSTALL_SPEC_NOT_INSTALLED, INSTALL_SPEC_ENABLED, INSTALL_SPEC_SUSPENDED, INSTALL_SPEC_DETACHED", + }, + + "log_denies_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Logs all denies and dry run failures.", + }, + + "monitoring": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSchema(), + }, + + "mutation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the ability to mutate resources using Policy Controller.", + }, + + "policy_content": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the desired policy content on the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSchema(), + }, + + "referential_rules_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "container_resources": { + Type: schema.TypeList, + Optional: true, + Description: "Container resource requirements.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSchema(), + }, + + "pod_affinity": { + Type: schema.TypeString, + Optional: true, + Description: "Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY", + }, + + "pod_tolerations": { + Type: schema.TypeList, + Optional: true, + Description: "Pod tolerations of node taints.", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSchema(), + }, + + "replica_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Pod replica count.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeList, + Optional: true, + Description: "Limits describes the maximum amount of compute resources allowed for use by the running container.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSchema(), + }, + + "requests": { + Type: schema.TypeList, + Optional: true, + Description: "Requests describes the amount of compute resources reserved for the container by the kube-scheduler.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSchema(), + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + + "memory": { + Type: schema.TypeString, + Optional: true, + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + + "memory": { + Type: schema.TypeString, + Optional: true, + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint effect.", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint key (not necessarily unique).", + }, + + "operator": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint operator.", + }, + + "value": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint value.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backends": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bundles": { + Type: schema.TypeSet, + Optional: true, + Description: "map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint.", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema(), + Set: schema.HashResource(GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema()), + }, + + "template_library": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configures the installation of the Template Library.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySchema(), + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bundle_name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "exempted_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces to be exempted from the bundle.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installation": { + Type: schema.TypeString, + Optional: true, + Description: "Configures the manner in which the template library is installed on the cluster. Possible values: INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL", + }, + }, + } +} + +func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + lockName, err := tpgresource.ReplaceVarsForId(d, config, "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/membershipId/{{ "{{" }}membership{{ "}}" }}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFeatureMembership(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating FeatureMembership: %s", err) + } + + log.Printf("[DEBUG] Finished creating FeatureMembership %q: %#v", d.Id(), res) + + return resourceGkeHubFeatureMembershipRead(d, meta) +} + +func resourceGkeHubFeatureMembershipRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetFeatureMembership(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("GkeHubFeatureMembership %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("feature", res.Feature); err != nil { + return fmt.Errorf("error setting feature in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("membership", res.Membership); err != nil { + return fmt.Errorf("error setting membership in state: %s", err) + } + if err = d.Set("configmanagement", flattenGkeHubFeatureMembershipConfigmanagement(res.Configmanagement)); err != nil { + return fmt.Errorf("error setting configmanagement in state: %s", err) + } + if err = d.Set("membership_location", res.MembershipLocation); err != nil { + return fmt.Errorf("error setting membership_location in state: %s", err) + } + if err = d.Set("mesh", flattenGkeHubFeatureMembershipMesh(res.Mesh)); err != nil { + return fmt.Errorf("error setting mesh in state: %s", err) + } + if err = d.Set("policycontroller", flattenGkeHubFeatureMembershipPolicycontroller(res.Policycontroller)); err != nil { + return fmt.Errorf("error setting policycontroller in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + + return nil +} +func resourceGkeHubFeatureMembershipUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + lockName, err := tpgresource.ReplaceVarsForId(d, config, "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + directive := dcl.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFeatureMembership(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating FeatureMembership: %s", err) + } + + log.Printf("[DEBUG] Finished creating FeatureMembership %q: %#v", d.Id(), res) + + return resourceGkeHubFeatureMembershipRead(d, meta) +} + +func resourceGkeHubFeatureMembershipDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + lockName, err := tpgresource.ReplaceVarsForId(d, config, "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + log.Printf("[DEBUG] Deleting FeatureMembership %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteFeatureMembership(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting FeatureMembership: %s", err) + } + + log.Printf("[DEBUG] Finished deleting FeatureMembership %q", d.Id()) + return nil +} + +func resourceGkeHubFeatureMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/features/(?P[^/]+)/membershipId/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/membershipId/{{ "{{" }}membership{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandGkeHubFeatureMembershipConfigmanagement(o interface{}) *FeatureMembershipConfigmanagement { + if o == nil { + return EmptyFeatureMembershipConfigmanagement + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipConfigmanagement + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagement{ + ConfigSync: expandGkeHubFeatureMembershipConfigmanagementConfigSync(obj["config_sync"]), + HierarchyController: expandGkeHubFeatureMembershipConfigmanagementHierarchyController(obj["hierarchy_controller"]), + Management: FeatureMembershipConfigmanagementManagementEnumRef(obj["management"].(string)), + PolicyController: expandGkeHubFeatureMembershipConfigmanagementPolicyController(obj["policy_controller"]), + Version: dcl.StringOrNil(obj["version"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagement(obj *FeatureMembershipConfigmanagement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "config_sync": flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj.ConfigSync), + "hierarchy_controller": flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj.HierarchyController), + "management": obj.Management, + "policy_controller": flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj.PolicyController), + "version": obj.Version, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSync(o interface{}) *FeatureMembershipConfigmanagementConfigSync { + if o == nil { + return EmptyFeatureMembershipConfigmanagementConfigSync + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipConfigmanagementConfigSync + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagementConfigSync{ + DeploymentOverrides: expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(obj["deployment_overrides"]), + Enabled: dcl.Bool(obj["enabled"].(bool)), + Git: expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj["git"]), + MetricsGcpServiceAccountEmail: dcl.String(obj["metrics_gcp_service_account_email"].(string)), + Oci: expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj["oci"]), + PreventDrift: dcl.Bool(obj["prevent_drift"].(bool)), + SourceFormat: dcl.String(obj["source_format"].(string)), + StopSyncing: dcl.Bool(obj["stop_syncing"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj *FeatureMembershipConfigmanagementConfigSync) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment_overrides": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(obj.DeploymentOverrides), + "enabled": obj.Enabled, + "git": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj.Git), + "metrics_gcp_service_account_email": obj.MetricsGcpServiceAccountEmail, + "oci": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj.Oci), + "prevent_drift": obj.PreventDrift, + "source_format": obj.SourceFormat, + "stop_syncing": obj.StopSyncing, + } + + return []interface{}{transformed} + +} +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(o interface{}) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if o == nil { + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(o interface{}) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if o == nil { + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + } + + obj := o.(map[string]interface{}) + return &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{ + Containers: expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(obj["containers"]), + DeploymentName: dcl.String(obj["deployment_name"].(string)), + DeploymentNamespace: dcl.String(obj["deployment_namespace"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(objs []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(obj *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "containers": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(obj.Containers), + "deployment_name": obj.DeploymentName, + "deployment_namespace": obj.DeploymentNamespace, + } + + return transformed + +} +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(o interface{}) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if o == nil { + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(o interface{}) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if o == nil { + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + } + + obj := o.(map[string]interface{}) + return &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{ + ContainerName: dcl.String(obj["container_name"].(string)), + CpuLimit: dcl.String(obj["cpu_limit"].(string)), + CpuRequest: dcl.String(obj["cpu_request"].(string)), + MemoryLimit: dcl.String(obj["memory_limit"].(string)), + MemoryRequest: dcl.String(obj["memory_request"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(objs []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(obj *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "container_name": obj.ContainerName, + "cpu_limit": obj.CpuLimit, + "cpu_request": obj.CpuRequest, + "memory_limit": obj.MemoryLimit, + "memory_request": obj.MemoryRequest, + } + + return transformed + +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(o interface{}) *FeatureMembershipConfigmanagementConfigSyncGit { + if o == nil { + return EmptyFeatureMembershipConfigmanagementConfigSyncGit + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipConfigmanagementConfigSyncGit + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagementConfigSyncGit{ + GcpServiceAccountEmail: dcl.String(obj["gcp_service_account_email"].(string)), + HttpsProxy: dcl.String(obj["https_proxy"].(string)), + PolicyDir: dcl.String(obj["policy_dir"].(string)), + SecretType: dcl.String(obj["secret_type"].(string)), + SyncBranch: dcl.String(obj["sync_branch"].(string)), + SyncRepo: dcl.String(obj["sync_repo"].(string)), + SyncRev: dcl.String(obj["sync_rev"].(string)), + SyncWaitSecs: dcl.String(obj["sync_wait_secs"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj *FeatureMembershipConfigmanagementConfigSyncGit) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gcp_service_account_email": obj.GcpServiceAccountEmail, + "https_proxy": obj.HttpsProxy, + "policy_dir": obj.PolicyDir, + "secret_type": obj.SecretType, + "sync_branch": obj.SyncBranch, + "sync_repo": obj.SyncRepo, + "sync_rev": obj.SyncRev, + "sync_wait_secs": obj.SyncWaitSecs, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(o interface{}) *FeatureMembershipConfigmanagementConfigSyncOci { + if o == nil { + return EmptyFeatureMembershipConfigmanagementConfigSyncOci + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipConfigmanagementConfigSyncOci + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagementConfigSyncOci{ + GcpServiceAccountEmail: dcl.String(obj["gcp_service_account_email"].(string)), + PolicyDir: dcl.String(obj["policy_dir"].(string)), + SecretType: dcl.String(obj["secret_type"].(string)), + SyncRepo: dcl.String(obj["sync_repo"].(string)), + SyncWaitSecs: dcl.String(obj["sync_wait_secs"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj *FeatureMembershipConfigmanagementConfigSyncOci) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gcp_service_account_email": obj.GcpServiceAccountEmail, + "policy_dir": obj.PolicyDir, + "secret_type": obj.SecretType, + "sync_repo": obj.SyncRepo, + "sync_wait_secs": obj.SyncWaitSecs, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementHierarchyController(o interface{}) *FeatureMembershipConfigmanagementHierarchyController { + if o == nil { + return EmptyFeatureMembershipConfigmanagementHierarchyController + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipConfigmanagementHierarchyController + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagementHierarchyController{ + EnableHierarchicalResourceQuota: dcl.Bool(obj["enable_hierarchical_resource_quota"].(bool)), + EnablePodTreeLabels: dcl.Bool(obj["enable_pod_tree_labels"].(bool)), + Enabled: dcl.Bool(obj["enabled"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj *FeatureMembershipConfigmanagementHierarchyController) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_hierarchical_resource_quota": obj.EnableHierarchicalResourceQuota, + "enable_pod_tree_labels": obj.EnablePodTreeLabels, + "enabled": obj.Enabled, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{}) *FeatureMembershipConfigmanagementPolicyController { + if o == nil { + return EmptyFeatureMembershipConfigmanagementPolicyController + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipConfigmanagementPolicyController + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagementPolicyController{ + AuditIntervalSeconds: dcl.String(obj["audit_interval_seconds"].(string)), + Enabled: dcl.Bool(obj["enabled"].(bool)), + ExemptableNamespaces: dcl.ExpandStringArray(obj["exemptable_namespaces"]), + LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), + Monitoring: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj["monitoring"]), + MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), + ReferentialRulesEnabled: dcl.Bool(obj["referential_rules_enabled"].(bool)), + TemplateLibraryInstalled: dcl.Bool(obj["template_library_installed"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *FeatureMembershipConfigmanagementPolicyController) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "audit_interval_seconds": obj.AuditIntervalSeconds, + "enabled": obj.Enabled, + "exemptable_namespaces": obj.ExemptableNamespaces, + "log_denies_enabled": obj.LogDeniesEnabled, + "monitoring": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj.Monitoring), + "mutation_enabled": obj.MutationEnabled, + "referential_rules_enabled": obj.ReferentialRulesEnabled, + "template_library_installed": obj.TemplateLibraryInstalled, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o interface{}) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipConfigmanagementPolicyControllerMonitoring{ + Backends: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj["backends"]), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj *FeatureMembershipConfigmanagementPolicyControllerMonitoring) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "backends": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj.Backends), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipMesh(o interface{}) *FeatureMembershipMesh { + if o == nil { + return EmptyFeatureMembershipMesh + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipMesh + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipMesh{ + ControlPlane: FeatureMembershipMeshControlPlaneEnumRef(obj["control_plane"].(string)), + Management: FeatureMembershipMeshManagementEnumRef(obj["management"].(string)), + } +} + +func flattenGkeHubFeatureMembershipMesh(obj *FeatureMembershipMesh) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "control_plane": obj.ControlPlane, + "management": obj.Management, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontroller(o interface{}) *FeatureMembershipPolicycontroller { + if o == nil { + return EmptyFeatureMembershipPolicycontroller + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipPolicycontroller + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontroller{ + PolicyControllerHubConfig: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj["policy_controller_hub_config"]), + Version: dcl.StringOrNil(obj["version"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontroller(obj *FeatureMembershipPolicycontroller) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "policy_controller_hub_config": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj.PolicyControllerHubConfig), + "version": obj.Version, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if o == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{ + AuditIntervalSeconds: dcl.Int64OrNil(int64(obj["audit_interval_seconds"].(int))), + ConstraintViolationLimit: dcl.Int64(int64(obj["constraint_violation_limit"].(int))), + DeploymentConfigs: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj["deployment_configs"]), + ExemptableNamespaces: dcl.ExpandStringArray(obj["exemptable_namespaces"]), + InstallSpec: FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(obj["install_spec"].(string)), + LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), + Monitoring: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj["monitoring"]), + MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), + PolicyContent: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj["policy_content"]), + ReferentialRulesEnabled: dcl.Bool(obj["referential_rules_enabled"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "audit_interval_seconds": obj.AuditIntervalSeconds, + "constraint_violation_limit": obj.ConstraintViolationLimit, + "deployment_configs": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj.DeploymentConfigs), + "exemptable_namespaces": obj.ExemptableNamespaces, + "install_spec": obj.InstallSpec, + "log_denies_enabled": obj.LogDeniesEnabled, + "monitoring": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj.Monitoring), + "mutation_enabled": obj.MutationEnabled, + "policy_content": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj.PolicyContent), + "referential_rules_enabled": obj.ReferentialRulesEnabled, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(o interface{}) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if o == nil { + return nil + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(item) + if item != nil { + items[item.(map[string]interface{})["component_name"].(string)] = *i + } + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{ + ContainerResources: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj["container_resources"]), + PodAffinity: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(obj["pod_affinity"].(string)), + PodTolerations: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj["pod_tolerations"]), + ReplicaCount: dcl.Int64(int64(obj["replica_count"].(int))), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(objs map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&item, name) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "container_resources": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj.ContainerResources), + "pod_affinity": obj.PodAffinity, + "pod_tolerations": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj.PodTolerations), + "replica_count": obj.ReplicaCount, + } + + transformed["component_name"] = name + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if o == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{ + Limits: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj["limits"]), + Requests: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj["requests"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "limits": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj.Limits), + "requests": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj.Requests), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if o == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{ + Cpu: dcl.String(obj["cpu"].(string)), + Memory: dcl.String(obj["memory"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu": obj.Cpu, + "memory": obj.Memory, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if o == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{ + Cpu: dcl.String(obj["cpu"].(string)), + Memory: dcl.String(obj["memory"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu": obj.Cpu, + "memory": obj.Memory, + } + + return []interface{}{transformed} + +} +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(o interface{}) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if o == nil { + return make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if o == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } + + obj := o.(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{ + Effect: dcl.String(obj["effect"].(string)), + Key: dcl.String(obj["key"].(string)), + Operator: dcl.String(obj["operator"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(objs []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "effect": obj.Effect, + "key": obj.Key, + "operator": obj.Operator, + "value": obj.Value, + } + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{ + Backends: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj["backends"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "backends": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj.Backends), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{ + Bundles: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj["bundles"]), + TemplateLibrary: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj["template_library"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bundles": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj.Bundles), + "template_library": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj.TemplateLibrary), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(o interface{}) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if o == nil { + return make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(item) + if item != nil { + items[item.(map[string]interface{})["bundle_name"].(string)] = *i + } + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if o == nil { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } + + obj := o.(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{ + ExemptedNamespaces: dcl.ExpandStringArray(obj["exempted_namespaces"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(objs map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&item, name) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "exempted_namespaces": obj.ExemptedNamespaces, + } + + transformed["bundle_name"] = name + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{ + Installation: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(obj["installation"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "installation": obj.Installation, + } + + return []interface{}{transformed} + +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(o interface{}) []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + objs := o.([]interface{}) + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(objs)) + for _, item := range objs { + i := FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(o interface{}) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + objs := o.([]interface{}) + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum, 0, len(objs)) + for _, item := range objs { + i := FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl index 4cb604caa2c6..834d4d26de9d 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_gke_hub_feature_membership' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkehub.googleapis.com' {{- if ne $.TargetVersionName "ga" }} api_version: 'v1beta' diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl index 9825cee156a2..3bbc24a27af3 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl @@ -6,13 +6,13 @@ import ( "strings" "testing" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub{{ $.DCLVersion }}" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/gkehub" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -1249,7 +1249,7 @@ func testAccCheckGkeHubFeatureMembershipPresent(t *testing.T, project, location, Project: dcl.String(project), } - _, err := transport_tpg.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) + _, err := gkehub.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) if err != nil { return err } @@ -1267,7 +1267,7 @@ func testAccCheckGkeHubFeatureMembershipNotPresent(t *testing.T, project, locati Project: dcl.String(project), } - _, err := transport_tpg.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) + _, err := gkehub.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) if err == nil { return fmt.Errorf("Did not expect to find GKE Feature Membership for projects/%s/locations/%s/features/%s/membershipId/%s", project, location, feature, membership) } diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/client.go b/mmv1/third_party/terraform/services/recaptchaenterprise/client.go new file mode 100644 index 000000000000..e28180ff2595 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/client.go @@ -0,0 +1,18 @@ +package recaptchaenterprise + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl new file mode 100644 index 000000000000..bb30fd396385 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl @@ -0,0 +1,759 @@ +package recaptchaenterprise + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Key struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + WebSettings *KeyWebSettings `json:"webSettings"` + AndroidSettings *KeyAndroidSettings `json:"androidSettings"` + IosSettings *KeyIosSettings `json:"iosSettings"` + Labels map[string]string `json:"labels"` + CreateTime *string `json:"createTime"` + TestingOptions *KeyTestingOptions `json:"testingOptions"` + WafSettings *KeyWafSettings `json:"wafSettings"` + Project *string `json:"project"` +} + +func (r *Key) String() string { + return dcl.SprintResource(r) +} + +// The enum KeyWebSettingsIntegrationTypeEnum. +type KeyWebSettingsIntegrationTypeEnum string + +// KeyWebSettingsIntegrationTypeEnumRef returns a *KeyWebSettingsIntegrationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWebSettingsIntegrationTypeEnumRef(s string) *KeyWebSettingsIntegrationTypeEnum { + v := KeyWebSettingsIntegrationTypeEnum(s) + return &v +} + +func (v KeyWebSettingsIntegrationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"SCORE", "CHECKBOX", "INVISIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWebSettingsIntegrationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyWebSettingsChallengeSecurityPreferenceEnum. +type KeyWebSettingsChallengeSecurityPreferenceEnum string + +// KeyWebSettingsChallengeSecurityPreferenceEnumRef returns a *KeyWebSettingsChallengeSecurityPreferenceEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWebSettingsChallengeSecurityPreferenceEnumRef(s string) *KeyWebSettingsChallengeSecurityPreferenceEnum { + v := KeyWebSettingsChallengeSecurityPreferenceEnum(s) + return &v +} + +func (v KeyWebSettingsChallengeSecurityPreferenceEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED", "USABILITY", "BALANCE", "SECURITY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWebSettingsChallengeSecurityPreferenceEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyTestingOptionsTestingChallengeEnum. +type KeyTestingOptionsTestingChallengeEnum string + +// KeyTestingOptionsTestingChallengeEnumRef returns a *KeyTestingOptionsTestingChallengeEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyTestingOptionsTestingChallengeEnumRef(s string) *KeyTestingOptionsTestingChallengeEnum { + v := KeyTestingOptionsTestingChallengeEnum(s) + return &v +} + +func (v KeyTestingOptionsTestingChallengeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TESTING_CHALLENGE_UNSPECIFIED", "NOCAPTCHA", "UNSOLVABLE_CHALLENGE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyTestingOptionsTestingChallengeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyWafSettingsWafServiceEnum. +type KeyWafSettingsWafServiceEnum string + +// KeyWafSettingsWafServiceEnumRef returns a *KeyWafSettingsWafServiceEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWafSettingsWafServiceEnumRef(s string) *KeyWafSettingsWafServiceEnum { + v := KeyWafSettingsWafServiceEnum(s) + return &v +} + +func (v KeyWafSettingsWafServiceEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CA", "FASTLY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWafSettingsWafServiceEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyWafSettingsWafFeatureEnum. +type KeyWafSettingsWafFeatureEnum string + +// KeyWafSettingsWafFeatureEnumRef returns a *KeyWafSettingsWafFeatureEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWafSettingsWafFeatureEnumRef(s string) *KeyWafSettingsWafFeatureEnum { + v := KeyWafSettingsWafFeatureEnum(s) + return &v +} + +func (v KeyWafSettingsWafFeatureEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CHALLENGE_PAGE", "SESSION_TOKEN", "ACTION_TOKEN", "EXPRESS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWafSettingsWafFeatureEnum", + Value: string(v), + Valid: []string{}, + } +} + +type KeyWebSettings struct { + empty bool `json:"-"` + AllowAllDomains *bool `json:"allowAllDomains"` + AllowedDomains []string `json:"allowedDomains"` + AllowAmpTraffic *bool `json:"allowAmpTraffic"` + IntegrationType *KeyWebSettingsIntegrationTypeEnum `json:"integrationType"` + ChallengeSecurityPreference *KeyWebSettingsChallengeSecurityPreferenceEnum `json:"challengeSecurityPreference"` +} + +type jsonKeyWebSettings KeyWebSettings + +func (r *KeyWebSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyWebSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyWebSettings + } else { + + r.AllowAllDomains = res.AllowAllDomains + + r.AllowedDomains = res.AllowedDomains + + r.AllowAmpTraffic = res.AllowAmpTraffic + + r.IntegrationType = res.IntegrationType + + r.ChallengeSecurityPreference = res.ChallengeSecurityPreference + + } + return nil +} + +// This object is used to assert a desired state where this KeyWebSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyWebSettings *KeyWebSettings = &KeyWebSettings{empty: true} + +func (r *KeyWebSettings) Empty() bool { + return r.empty +} + +func (r *KeyWebSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyWebSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyAndroidSettings struct { + empty bool `json:"-"` + AllowAllPackageNames *bool `json:"allowAllPackageNames"` + AllowedPackageNames []string `json:"allowedPackageNames"` +} + +type jsonKeyAndroidSettings KeyAndroidSettings + +func (r *KeyAndroidSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyAndroidSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyAndroidSettings + } else { + + r.AllowAllPackageNames = res.AllowAllPackageNames + + r.AllowedPackageNames = res.AllowedPackageNames + + } + return nil +} + +// This object is used to assert a desired state where this KeyAndroidSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyAndroidSettings *KeyAndroidSettings = &KeyAndroidSettings{empty: true} + +func (r *KeyAndroidSettings) Empty() bool { + return r.empty +} + +func (r *KeyAndroidSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyAndroidSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyIosSettings struct { + empty bool `json:"-"` + AllowAllBundleIds *bool `json:"allowAllBundleIds"` + AllowedBundleIds []string `json:"allowedBundleIds"` +} + +type jsonKeyIosSettings KeyIosSettings + +func (r *KeyIosSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyIosSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyIosSettings + } else { + + r.AllowAllBundleIds = res.AllowAllBundleIds + + r.AllowedBundleIds = res.AllowedBundleIds + + } + return nil +} + +// This object is used to assert a desired state where this KeyIosSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyIosSettings *KeyIosSettings = &KeyIosSettings{empty: true} + +func (r *KeyIosSettings) Empty() bool { + return r.empty +} + +func (r *KeyIosSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyIosSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyTestingOptions struct { + empty bool `json:"-"` + TestingScore *float64 `json:"testingScore"` + TestingChallenge *KeyTestingOptionsTestingChallengeEnum `json:"testingChallenge"` +} + +type jsonKeyTestingOptions KeyTestingOptions + +func (r *KeyTestingOptions) UnmarshalJSON(data []byte) error { + var res jsonKeyTestingOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyTestingOptions + } else { + + r.TestingScore = res.TestingScore + + r.TestingChallenge = res.TestingChallenge + + } + return nil +} + +// This object is used to assert a desired state where this KeyTestingOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyTestingOptions *KeyTestingOptions = &KeyTestingOptions{empty: true} + +func (r *KeyTestingOptions) Empty() bool { + return r.empty +} + +func (r *KeyTestingOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyTestingOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyWafSettings struct { + empty bool `json:"-"` + WafService *KeyWafSettingsWafServiceEnum `json:"wafService"` + WafFeature *KeyWafSettingsWafFeatureEnum `json:"wafFeature"` +} + +type jsonKeyWafSettings KeyWafSettings + +func (r *KeyWafSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyWafSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyWafSettings + } else { + + r.WafService = res.WafService + + r.WafFeature = res.WafFeature + + } + return nil +} + +// This object is used to assert a desired state where this KeyWafSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyWafSettings *KeyWafSettings = &KeyWafSettings{empty: true} + +func (r *KeyWafSettings) Empty() bool { + return r.empty +} + +func (r *KeyWafSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyWafSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Key) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "recaptcha_enterprise", + Type: "Key", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "recaptchaenterprise", +{{- end }} + } +} + +func (r *Key) ID() (string, error) { + if err := extractKeyFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "web_settings": dcl.ValueOrEmptyString(nr.WebSettings), + "android_settings": dcl.ValueOrEmptyString(nr.AndroidSettings), + "ios_settings": dcl.ValueOrEmptyString(nr.IosSettings), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "testing_options": dcl.ValueOrEmptyString(nr.TestingOptions), + "waf_settings": dcl.ValueOrEmptyString(nr.WafSettings), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const KeyMaxPage = -1 + +type KeyList struct { + Items []*Key + + nextToken string + + pageSize int32 + + resource *Key +} + +func (l *KeyList) HasNext() bool { + return l.nextToken != "" +} + +func (l *KeyList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listKey(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListKey(ctx context.Context, project string) (*KeyList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListKeyWithMaxResults(ctx, project, KeyMaxPage) + +} + +func (c *Client) ListKeyWithMaxResults(ctx context.Context, project string, pageSize int32) (*KeyList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Key{ + Project: &project, + } + items, token, err := c.listKey(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &KeyList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetKey(ctx context.Context, r *Key) (*Key, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractKeyFields(r) + + b, err := c.getKeyRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalKey(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeKeyNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractKeyFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteKey(ctx context.Context, r *Key) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Key resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Key...") + deleteOp := deleteKeyOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllKey deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllKey(ctx context.Context, project string, filter func(*Key) bool) error { + listObj, err := c.ListKey(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyKey(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Key + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyKeyHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyKeyHelper(c *Client, ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyKey...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractKeyFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.keyDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToKeyDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []keyApiOperation + if create { + ops = append(ops, &createKeyOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyKeyDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyKeyDiff(c *Client, ctx context.Context, desired *Key, rawDesired *Key, ops []keyApiOperation, opts ...dcl.ApplyOption) (*Key, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetKey(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createKeyOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapKey(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeKeyNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeKeyNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeKeyDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractKeyFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractKeyFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffKey(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go b/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go new file mode 100644 index 000000000000..b4c24f16b6e1 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go @@ -0,0 +1,2750 @@ +package recaptchaenterprise + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *Key) validate() error { + + if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"WebSettings", "AndroidSettings", "IosSettings"}, r.WebSettings, r.AndroidSettings, r.IosSettings); err != nil { + return err + } + if err := dcl.Required(r, "displayName"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.WebSettings) { + if err := r.WebSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AndroidSettings) { + if err := r.AndroidSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.IosSettings) { + if err := r.IosSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.TestingOptions) { + if err := r.TestingOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WafSettings) { + if err := r.WafSettings.validate(); err != nil { + return err + } + } + return nil +} +func (r *KeyWebSettings) validate() error { + if err := dcl.Required(r, "integrationType"); err != nil { + return err + } + return nil +} +func (r *KeyAndroidSettings) validate() error { + return nil +} +func (r *KeyIosSettings) validate() error { + return nil +} +func (r *KeyTestingOptions) validate() error { + return nil +} +func (r *KeyWafSettings) validate() error { + if err := dcl.Required(r, "wafService"); err != nil { + return err + } + if err := dcl.Required(r, "wafFeature"); err != nil { + return err + } + return nil +} +func (r *Key) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://recaptchaenterprise.googleapis.com/v1/", params) +} + +func (r *Key) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/keys/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Key) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/keys", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{project}}/keys", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/keys/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// keyApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type keyApiOperation interface { + do(context.Context, *Key, *Client) error +} + +// newUpdateKeyUpdateKeyRequest creates a request for an +// Key resource's UpdateKey update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateKeyUpdateKeyRequest(ctx context.Context, f *Key, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v, err := expandKeyWebSettings(c, f.WebSettings, res); err != nil { + return nil, fmt.Errorf("error expanding WebSettings into webSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["webSettings"] = v + } + if v, err := expandKeyAndroidSettings(c, f.AndroidSettings, res); err != nil { + return nil, fmt.Errorf("error expanding AndroidSettings into androidSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["androidSettings"] = v + } + if v, err := expandKeyIosSettings(c, f.IosSettings, res); err != nil { + return nil, fmt.Errorf("error expanding IosSettings into iosSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["iosSettings"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + return req, nil +} + +// marshalUpdateKeyUpdateKeyRequest converts the update into +// the final JSON request body. +func marshalUpdateKeyUpdateKeyRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateKeyUpdateKeyOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateKeyUpdateKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + _, err := c.GetKey(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateKey") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateKeyUpdateKeyRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateKeyUpdateKeyRequest(c, req) + if err != nil { + return err + } + _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + return nil +} + +func (c *Client) listKeyRaw(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != KeyMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listKeyOperation struct { + Keys []map[string]interface{} `json:"keys"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listKey(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]*Key, string, error) { + b, err := c.listKeyRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listKeyOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Key + for _, v := range m.Keys { + res, err := unmarshalMapKey(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllKey(ctx context.Context, f func(*Key) bool, resources []*Key) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteKey(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteKeyOperation struct{} + +func (op *deleteKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + r, err := c.GetKey(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Key not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetKey checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Key: %w", err) + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createKeyOperation struct { + response map[string]interface{} +} + +func (op *createKeyOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + if r.Name != nil { + // Allowing creation to continue with Name set could result in a Key with the wrong Name. + return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + // Include Name in URL substitution for initial GET request. + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + + if _, err := c.GetKey(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getKeyRaw(ctx context.Context, r *Key) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) keyDiffsForRawDesired(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (initial, desired *Key, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Key + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Key); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Key, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + if fetchState.Name == nil { + // We cannot perform a get because of lack of information. We have to assume + // that this is being created for the first time. + desired, err := canonicalizeKeyDesiredState(rawDesired, nil) + return nil, desired, nil, err + } + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetKey(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Key resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Key resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Key resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Key: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Key: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractKeyFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeKeyInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Key: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Key: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffKey(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeKeyInitialState(rawInitial, rawDesired *Key) (*Key, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.WebSettings) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.AndroidSettings, rawInitial.IosSettings) { + rawInitial.WebSettings = EmptyKeyWebSettings + } + } + + if !dcl.IsZeroValue(rawInitial.AndroidSettings) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.WebSettings, rawInitial.IosSettings) { + rawInitial.AndroidSettings = EmptyKeyAndroidSettings + } + } + + if !dcl.IsZeroValue(rawInitial.IosSettings) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.WebSettings, rawInitial.AndroidSettings) { + rawInitial.IosSettings = EmptyKeyIosSettings + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeKeyDesiredState(rawDesired, rawInitial *Key, opts ...dcl.ApplyOption) (*Key, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.WebSettings = canonicalizeKeyWebSettings(rawDesired.WebSettings, nil, opts...) + rawDesired.AndroidSettings = canonicalizeKeyAndroidSettings(rawDesired.AndroidSettings, nil, opts...) + rawDesired.IosSettings = canonicalizeKeyIosSettings(rawDesired.IosSettings, nil, opts...) + rawDesired.TestingOptions = canonicalizeKeyTestingOptions(rawDesired.TestingOptions, nil, opts...) + rawDesired.WafSettings = canonicalizeKeyWafSettings(rawDesired.WafSettings, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Key{} + if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + canonicalDesired.WebSettings = canonicalizeKeyWebSettings(rawDesired.WebSettings, rawInitial.WebSettings, opts...) + canonicalDesired.AndroidSettings = canonicalizeKeyAndroidSettings(rawDesired.AndroidSettings, rawInitial.AndroidSettings, opts...) + canonicalDesired.IosSettings = canonicalizeKeyIosSettings(rawDesired.IosSettings, rawInitial.IosSettings, opts...) + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.TestingOptions = canonicalizeKeyTestingOptions(rawDesired.TestingOptions, rawInitial.TestingOptions, opts...) + canonicalDesired.WafSettings = canonicalizeKeyWafSettings(rawDesired.WafSettings, rawInitial.WafSettings, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + + if canonicalDesired.WebSettings != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.AndroidSettings, rawDesired.IosSettings) { + canonicalDesired.WebSettings = EmptyKeyWebSettings + } + } + + if canonicalDesired.AndroidSettings != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.WebSettings, rawDesired.IosSettings) { + canonicalDesired.AndroidSettings = EmptyKeyAndroidSettings + } + } + + if canonicalDesired.IosSettings != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.WebSettings, rawDesired.AndroidSettings) { + canonicalDesired.IosSettings = EmptyKeyIosSettings + } + } + + return canonicalDesired, nil +} + +func canonicalizeKeyNewState(c *Client, rawNew, rawDesired *Key) (*Key, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.WebSettings) && dcl.IsEmptyValueIndirect(rawDesired.WebSettings) { + rawNew.WebSettings = rawDesired.WebSettings + } else { + rawNew.WebSettings = canonicalizeNewKeyWebSettings(c, rawDesired.WebSettings, rawNew.WebSettings) + } + + if dcl.IsEmptyValueIndirect(rawNew.AndroidSettings) && dcl.IsEmptyValueIndirect(rawDesired.AndroidSettings) { + rawNew.AndroidSettings = rawDesired.AndroidSettings + } else { + rawNew.AndroidSettings = canonicalizeNewKeyAndroidSettings(c, rawDesired.AndroidSettings, rawNew.AndroidSettings) + } + + if dcl.IsEmptyValueIndirect(rawNew.IosSettings) && dcl.IsEmptyValueIndirect(rawDesired.IosSettings) { + rawNew.IosSettings = rawDesired.IosSettings + } else { + rawNew.IosSettings = canonicalizeNewKeyIosSettings(c, rawDesired.IosSettings, rawNew.IosSettings) + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.TestingOptions) && dcl.IsEmptyValueIndirect(rawDesired.TestingOptions) { + rawNew.TestingOptions = rawDesired.TestingOptions + } else { + rawNew.TestingOptions = canonicalizeNewKeyTestingOptions(c, rawDesired.TestingOptions, rawNew.TestingOptions) + } + + if dcl.IsEmptyValueIndirect(rawNew.WafSettings) && dcl.IsEmptyValueIndirect(rawDesired.WafSettings) { + rawNew.WafSettings = rawDesired.WafSettings + } else { + rawNew.WafSettings = canonicalizeNewKeyWafSettings(c, rawDesired.WafSettings, rawNew.WafSettings) + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +func canonicalizeKeyWebSettings(des, initial *KeyWebSettings, opts ...dcl.ApplyOption) *KeyWebSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyWebSettings{} + + if dcl.BoolCanonicalize(des.AllowAllDomains, initial.AllowAllDomains) || dcl.IsZeroValue(des.AllowAllDomains) { + cDes.AllowAllDomains = initial.AllowAllDomains + } else { + cDes.AllowAllDomains = des.AllowAllDomains + } + if dcl.StringArrayCanonicalize(des.AllowedDomains, initial.AllowedDomains) { + cDes.AllowedDomains = initial.AllowedDomains + } else { + cDes.AllowedDomains = des.AllowedDomains + } + if dcl.BoolCanonicalize(des.AllowAmpTraffic, initial.AllowAmpTraffic) || dcl.IsZeroValue(des.AllowAmpTraffic) { + cDes.AllowAmpTraffic = initial.AllowAmpTraffic + } else { + cDes.AllowAmpTraffic = des.AllowAmpTraffic + } + if dcl.IsZeroValue(des.IntegrationType) || (dcl.IsEmptyValueIndirect(des.IntegrationType) && dcl.IsEmptyValueIndirect(initial.IntegrationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.IntegrationType = initial.IntegrationType + } else { + cDes.IntegrationType = des.IntegrationType + } + if dcl.IsZeroValue(des.ChallengeSecurityPreference) || (dcl.IsEmptyValueIndirect(des.ChallengeSecurityPreference) && dcl.IsEmptyValueIndirect(initial.ChallengeSecurityPreference)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ChallengeSecurityPreference = initial.ChallengeSecurityPreference + } else { + cDes.ChallengeSecurityPreference = des.ChallengeSecurityPreference + } + + return cDes +} + +func canonicalizeKeyWebSettingsSlice(des, initial []KeyWebSettings, opts ...dcl.ApplyOption) []KeyWebSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyWebSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyWebSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyWebSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyWebSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyWebSettings(c *Client, des, nw *KeyWebSettings) *KeyWebSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyWebSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AllowAllDomains, nw.AllowAllDomains) { + nw.AllowAllDomains = des.AllowAllDomains + } + if dcl.StringArrayCanonicalize(des.AllowedDomains, nw.AllowedDomains) { + nw.AllowedDomains = des.AllowedDomains + } + if dcl.BoolCanonicalize(des.AllowAmpTraffic, nw.AllowAmpTraffic) { + nw.AllowAmpTraffic = des.AllowAmpTraffic + } + + return nw +} + +func canonicalizeNewKeyWebSettingsSet(c *Client, des, nw []KeyWebSettings) []KeyWebSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyWebSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyWebSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyWebSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyWebSettingsSlice(c *Client, des, nw []KeyWebSettings) []KeyWebSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyWebSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyWebSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyAndroidSettings(des, initial *KeyAndroidSettings, opts ...dcl.ApplyOption) *KeyAndroidSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyAndroidSettings{} + + if dcl.BoolCanonicalize(des.AllowAllPackageNames, initial.AllowAllPackageNames) || dcl.IsZeroValue(des.AllowAllPackageNames) { + cDes.AllowAllPackageNames = initial.AllowAllPackageNames + } else { + cDes.AllowAllPackageNames = des.AllowAllPackageNames + } + if dcl.StringArrayCanonicalize(des.AllowedPackageNames, initial.AllowedPackageNames) { + cDes.AllowedPackageNames = initial.AllowedPackageNames + } else { + cDes.AllowedPackageNames = des.AllowedPackageNames + } + + return cDes +} + +func canonicalizeKeyAndroidSettingsSlice(des, initial []KeyAndroidSettings, opts ...dcl.ApplyOption) []KeyAndroidSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyAndroidSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyAndroidSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyAndroidSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyAndroidSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyAndroidSettings(c *Client, des, nw *KeyAndroidSettings) *KeyAndroidSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyAndroidSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AllowAllPackageNames, nw.AllowAllPackageNames) { + nw.AllowAllPackageNames = des.AllowAllPackageNames + } + if dcl.StringArrayCanonicalize(des.AllowedPackageNames, nw.AllowedPackageNames) { + nw.AllowedPackageNames = des.AllowedPackageNames + } + + return nw +} + +func canonicalizeNewKeyAndroidSettingsSet(c *Client, des, nw []KeyAndroidSettings) []KeyAndroidSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyAndroidSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyAndroidSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyAndroidSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyAndroidSettingsSlice(c *Client, des, nw []KeyAndroidSettings) []KeyAndroidSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyAndroidSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyAndroidSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyIosSettings(des, initial *KeyIosSettings, opts ...dcl.ApplyOption) *KeyIosSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyIosSettings{} + + if dcl.BoolCanonicalize(des.AllowAllBundleIds, initial.AllowAllBundleIds) || dcl.IsZeroValue(des.AllowAllBundleIds) { + cDes.AllowAllBundleIds = initial.AllowAllBundleIds + } else { + cDes.AllowAllBundleIds = des.AllowAllBundleIds + } + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, initial.AllowedBundleIds) { + cDes.AllowedBundleIds = initial.AllowedBundleIds + } else { + cDes.AllowedBundleIds = des.AllowedBundleIds + } + + return cDes +} + +func canonicalizeKeyIosSettingsSlice(des, initial []KeyIosSettings, opts ...dcl.ApplyOption) []KeyIosSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyIosSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyIosSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyIosSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyIosSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyIosSettings(c *Client, des, nw *KeyIosSettings) *KeyIosSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyIosSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AllowAllBundleIds, nw.AllowAllBundleIds) { + nw.AllowAllBundleIds = des.AllowAllBundleIds + } + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, nw.AllowedBundleIds) { + nw.AllowedBundleIds = des.AllowedBundleIds + } + + return nw +} + +func canonicalizeNewKeyIosSettingsSet(c *Client, des, nw []KeyIosSettings) []KeyIosSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyIosSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyIosSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyIosSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyIosSettingsSlice(c *Client, des, nw []KeyIosSettings) []KeyIosSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyIosSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyIosSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyTestingOptions(des, initial *KeyTestingOptions, opts ...dcl.ApplyOption) *KeyTestingOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyTestingOptions{} + + if dcl.IsZeroValue(des.TestingScore) || (dcl.IsEmptyValueIndirect(des.TestingScore) && dcl.IsEmptyValueIndirect(initial.TestingScore)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TestingScore = initial.TestingScore + } else { + cDes.TestingScore = des.TestingScore + } + if dcl.IsZeroValue(des.TestingChallenge) || (dcl.IsEmptyValueIndirect(des.TestingChallenge) && dcl.IsEmptyValueIndirect(initial.TestingChallenge)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TestingChallenge = initial.TestingChallenge + } else { + cDes.TestingChallenge = des.TestingChallenge + } + + return cDes +} + +func canonicalizeKeyTestingOptionsSlice(des, initial []KeyTestingOptions, opts ...dcl.ApplyOption) []KeyTestingOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyTestingOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyTestingOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyTestingOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyTestingOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyTestingOptions(c *Client, des, nw *KeyTestingOptions) *KeyTestingOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyTestingOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewKeyTestingOptionsSet(c *Client, des, nw []KeyTestingOptions) []KeyTestingOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyTestingOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyTestingOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyTestingOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyTestingOptionsSlice(c *Client, des, nw []KeyTestingOptions) []KeyTestingOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyTestingOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyTestingOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyWafSettings(des, initial *KeyWafSettings, opts ...dcl.ApplyOption) *KeyWafSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyWafSettings{} + + if dcl.IsZeroValue(des.WafService) || (dcl.IsEmptyValueIndirect(des.WafService) && dcl.IsEmptyValueIndirect(initial.WafService)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WafService = initial.WafService + } else { + cDes.WafService = des.WafService + } + if dcl.IsZeroValue(des.WafFeature) || (dcl.IsEmptyValueIndirect(des.WafFeature) && dcl.IsEmptyValueIndirect(initial.WafFeature)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WafFeature = initial.WafFeature + } else { + cDes.WafFeature = des.WafFeature + } + + return cDes +} + +func canonicalizeKeyWafSettingsSlice(des, initial []KeyWafSettings, opts ...dcl.ApplyOption) []KeyWafSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyWafSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyWafSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyWafSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyWafSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyWafSettings(c *Client, des, nw *KeyWafSettings) *KeyWafSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyWafSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewKeyWafSettingsSet(c *Client, des, nw []KeyWafSettings) []KeyWafSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyWafSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyWafSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyWafSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyWafSettingsSlice(c *Client, des, nw []KeyWafSettings) []KeyWafSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyWafSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyWafSettings(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffKey(c *Client, desired, actual *Key, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WebSettings, actual.WebSettings, dcl.DiffInfo{ObjectFunction: compareKeyWebSettingsNewStyle, EmptyObject: EmptyKeyWebSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WebSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AndroidSettings, actual.AndroidSettings, dcl.DiffInfo{ObjectFunction: compareKeyAndroidSettingsNewStyle, EmptyObject: EmptyKeyAndroidSettings, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AndroidSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.IosSettings, actual.IosSettings, dcl.DiffInfo{ObjectFunction: compareKeyIosSettingsNewStyle, EmptyObject: EmptyKeyIosSettings, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("IosSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.TestingOptions, actual.TestingOptions, dcl.DiffInfo{ObjectFunction: compareKeyTestingOptionsNewStyle, EmptyObject: EmptyKeyTestingOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TestingOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WafSettings, actual.WafSettings, dcl.DiffInfo{ObjectFunction: compareKeyWafSettingsNewStyle, EmptyObject: EmptyKeyWafSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WafSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareKeyWebSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyWebSettings) + if !ok { + desiredNotPointer, ok := d.(KeyWebSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWebSettings or *KeyWebSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyWebSettings) + if !ok { + actualNotPointer, ok := a.(KeyWebSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWebSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowAllDomains, actual.AllowAllDomains, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAllDomains")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowedDomains, actual.AllowedDomains, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedDomains")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowAmpTraffic, actual.AllowAmpTraffic, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAmpTraffic")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IntegrationType, actual.IntegrationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IntegrationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ChallengeSecurityPreference, actual.ChallengeSecurityPreference, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("ChallengeSecurityPreference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyAndroidSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyAndroidSettings) + if !ok { + desiredNotPointer, ok := d.(KeyAndroidSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyAndroidSettings or *KeyAndroidSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyAndroidSettings) + if !ok { + actualNotPointer, ok := a.(KeyAndroidSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyAndroidSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowAllPackageNames, actual.AllowAllPackageNames, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAllPackageNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowedPackageNames, actual.AllowedPackageNames, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedPackageNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyIosSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyIosSettings) + if !ok { + desiredNotPointer, ok := d.(KeyIosSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyIosSettings or *KeyIosSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyIosSettings) + if !ok { + actualNotPointer, ok := a.(KeyIosSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyIosSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowAllBundleIds, actual.AllowAllBundleIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAllBundleIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowedBundleIds, actual.AllowedBundleIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedBundleIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyTestingOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyTestingOptions) + if !ok { + desiredNotPointer, ok := d.(KeyTestingOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyTestingOptions or *KeyTestingOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyTestingOptions) + if !ok { + actualNotPointer, ok := a.(KeyTestingOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyTestingOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TestingScore, actual.TestingScore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TestingScore")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TestingChallenge, actual.TestingChallenge, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TestingChallenge")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyWafSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyWafSettings) + if !ok { + desiredNotPointer, ok := d.(KeyWafSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWafSettings or *KeyWafSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyWafSettings) + if !ok { + actualNotPointer, ok := a.(KeyWafSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWafSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.WafService, actual.WafService, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WafService")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WafFeature, actual.WafFeature, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WafFeature")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Key) urlNormalized() *Key { + normalized := dcl.Copy(*r).(Key) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Key) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateKey" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/keys/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Key resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Key) marshal(c *Client) ([]byte, error) { + m, err := expandKey(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Key: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalKey decodes JSON responses into the Key resource schema. +func unmarshalKey(b []byte, c *Client, res *Key) (*Key, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapKey(m, c, res) +} + +func unmarshalMapKey(m map[string]interface{}, c *Client, res *Key) (*Key, error) { + + flattened := flattenKey(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandKey expands Key into a JSON request object. +func expandKey(c *Client, f *Key) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/keys/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v, err := expandKeyWebSettings(c, f.WebSettings, res); err != nil { + return nil, fmt.Errorf("error expanding WebSettings into webSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["webSettings"] = v + } + if v, err := expandKeyAndroidSettings(c, f.AndroidSettings, res); err != nil { + return nil, fmt.Errorf("error expanding AndroidSettings into androidSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["androidSettings"] = v + } + if v, err := expandKeyIosSettings(c, f.IosSettings, res); err != nil { + return nil, fmt.Errorf("error expanding IosSettings into iosSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["iosSettings"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandKeyTestingOptions(c, f.TestingOptions, res); err != nil { + return nil, fmt.Errorf("error expanding TestingOptions into testingOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["testingOptions"] = v + } + if v, err := expandKeyWafSettings(c, f.WafSettings, res); err != nil { + return nil, fmt.Errorf("error expanding WafSettings into wafSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["wafSettings"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenKey flattens Key from a JSON request object into the +// Key type. +func flattenKey(c *Client, i interface{}, res *Key) *Key { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Key{} + resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.WebSettings = flattenKeyWebSettings(c, m["webSettings"], res) + resultRes.AndroidSettings = flattenKeyAndroidSettings(c, m["androidSettings"], res) + resultRes.IosSettings = flattenKeyIosSettings(c, m["iosSettings"], res) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.TestingOptions = flattenKeyTestingOptions(c, m["testingOptions"], res) + resultRes.WafSettings = flattenKeyWafSettings(c, m["wafSettings"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// expandKeyWebSettingsMap expands the contents of KeyWebSettings into a JSON +// request object. +func expandKeyWebSettingsMap(c *Client, f map[string]KeyWebSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyWebSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyWebSettingsSlice expands the contents of KeyWebSettings into a JSON +// request object. +func expandKeyWebSettingsSlice(c *Client, f []KeyWebSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyWebSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyWebSettingsMap flattens the contents of KeyWebSettings from a JSON +// response object. +func flattenKeyWebSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyWebSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWebSettings{} + } + + if len(a) == 0 { + return map[string]KeyWebSettings{} + } + + items := make(map[string]KeyWebSettings) + for k, item := range a { + items[k] = *flattenKeyWebSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyWebSettingsSlice flattens the contents of KeyWebSettings from a JSON +// response object. +func flattenKeyWebSettingsSlice(c *Client, i interface{}, res *Key) []KeyWebSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyWebSettings{} + } + + if len(a) == 0 { + return []KeyWebSettings{} + } + + items := make([]KeyWebSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWebSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyWebSettings expands an instance of KeyWebSettings into a JSON +// request object. +func expandKeyWebSettings(c *Client, f *KeyWebSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowAllDomains; !dcl.IsEmptyValueIndirect(v) { + m["allowAllDomains"] = v + } + if v := f.AllowedDomains; v != nil { + m["allowedDomains"] = v + } + if v := f.AllowAmpTraffic; !dcl.IsEmptyValueIndirect(v) { + m["allowAmpTraffic"] = v + } + if v := f.IntegrationType; !dcl.IsEmptyValueIndirect(v) { + m["integrationType"] = v + } + if v := f.ChallengeSecurityPreference; !dcl.IsEmptyValueIndirect(v) { + m["challengeSecurityPreference"] = v + } + + return m, nil +} + +// flattenKeyWebSettings flattens an instance of KeyWebSettings from a JSON +// response object. +func flattenKeyWebSettings(c *Client, i interface{}, res *Key) *KeyWebSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyWebSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyWebSettings + } + r.AllowAllDomains = dcl.FlattenBool(m["allowAllDomains"]) + r.AllowedDomains = dcl.FlattenStringSlice(m["allowedDomains"]) + r.AllowAmpTraffic = dcl.FlattenBool(m["allowAmpTraffic"]) + r.IntegrationType = flattenKeyWebSettingsIntegrationTypeEnum(m["integrationType"]) + r.ChallengeSecurityPreference = flattenKeyWebSettingsChallengeSecurityPreferenceEnum(m["challengeSecurityPreference"]) + + return r +} + +// expandKeyAndroidSettingsMap expands the contents of KeyAndroidSettings into a JSON +// request object. +func expandKeyAndroidSettingsMap(c *Client, f map[string]KeyAndroidSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyAndroidSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyAndroidSettingsSlice expands the contents of KeyAndroidSettings into a JSON +// request object. +func expandKeyAndroidSettingsSlice(c *Client, f []KeyAndroidSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyAndroidSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyAndroidSettingsMap flattens the contents of KeyAndroidSettings from a JSON +// response object. +func flattenKeyAndroidSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyAndroidSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyAndroidSettings{} + } + + if len(a) == 0 { + return map[string]KeyAndroidSettings{} + } + + items := make(map[string]KeyAndroidSettings) + for k, item := range a { + items[k] = *flattenKeyAndroidSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyAndroidSettingsSlice flattens the contents of KeyAndroidSettings from a JSON +// response object. +func flattenKeyAndroidSettingsSlice(c *Client, i interface{}, res *Key) []KeyAndroidSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyAndroidSettings{} + } + + if len(a) == 0 { + return []KeyAndroidSettings{} + } + + items := make([]KeyAndroidSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyAndroidSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyAndroidSettings expands an instance of KeyAndroidSettings into a JSON +// request object. +func expandKeyAndroidSettings(c *Client, f *KeyAndroidSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowAllPackageNames; !dcl.IsEmptyValueIndirect(v) { + m["allowAllPackageNames"] = v + } + if v := f.AllowedPackageNames; v != nil { + m["allowedPackageNames"] = v + } + + return m, nil +} + +// flattenKeyAndroidSettings flattens an instance of KeyAndroidSettings from a JSON +// response object. +func flattenKeyAndroidSettings(c *Client, i interface{}, res *Key) *KeyAndroidSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyAndroidSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyAndroidSettings + } + r.AllowAllPackageNames = dcl.FlattenBool(m["allowAllPackageNames"]) + r.AllowedPackageNames = dcl.FlattenStringSlice(m["allowedPackageNames"]) + + return r +} + +// expandKeyIosSettingsMap expands the contents of KeyIosSettings into a JSON +// request object. +func expandKeyIosSettingsMap(c *Client, f map[string]KeyIosSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyIosSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyIosSettingsSlice expands the contents of KeyIosSettings into a JSON +// request object. +func expandKeyIosSettingsSlice(c *Client, f []KeyIosSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyIosSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyIosSettingsMap flattens the contents of KeyIosSettings from a JSON +// response object. +func flattenKeyIosSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyIosSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyIosSettings{} + } + + if len(a) == 0 { + return map[string]KeyIosSettings{} + } + + items := make(map[string]KeyIosSettings) + for k, item := range a { + items[k] = *flattenKeyIosSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyIosSettingsSlice flattens the contents of KeyIosSettings from a JSON +// response object. +func flattenKeyIosSettingsSlice(c *Client, i interface{}, res *Key) []KeyIosSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyIosSettings{} + } + + if len(a) == 0 { + return []KeyIosSettings{} + } + + items := make([]KeyIosSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyIosSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyIosSettings expands an instance of KeyIosSettings into a JSON +// request object. +func expandKeyIosSettings(c *Client, f *KeyIosSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowAllBundleIds; !dcl.IsEmptyValueIndirect(v) { + m["allowAllBundleIds"] = v + } + if v := f.AllowedBundleIds; v != nil { + m["allowedBundleIds"] = v + } + + return m, nil +} + +// flattenKeyIosSettings flattens an instance of KeyIosSettings from a JSON +// response object. +func flattenKeyIosSettings(c *Client, i interface{}, res *Key) *KeyIosSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyIosSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyIosSettings + } + r.AllowAllBundleIds = dcl.FlattenBool(m["allowAllBundleIds"]) + r.AllowedBundleIds = dcl.FlattenStringSlice(m["allowedBundleIds"]) + + return r +} + +// expandKeyTestingOptionsMap expands the contents of KeyTestingOptions into a JSON +// request object. +func expandKeyTestingOptionsMap(c *Client, f map[string]KeyTestingOptions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyTestingOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyTestingOptionsSlice expands the contents of KeyTestingOptions into a JSON +// request object. +func expandKeyTestingOptionsSlice(c *Client, f []KeyTestingOptions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyTestingOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyTestingOptionsMap flattens the contents of KeyTestingOptions from a JSON +// response object. +func flattenKeyTestingOptionsMap(c *Client, i interface{}, res *Key) map[string]KeyTestingOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyTestingOptions{} + } + + if len(a) == 0 { + return map[string]KeyTestingOptions{} + } + + items := make(map[string]KeyTestingOptions) + for k, item := range a { + items[k] = *flattenKeyTestingOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyTestingOptionsSlice flattens the contents of KeyTestingOptions from a JSON +// response object. +func flattenKeyTestingOptionsSlice(c *Client, i interface{}, res *Key) []KeyTestingOptions { + a, ok := i.([]interface{}) + if !ok { + return []KeyTestingOptions{} + } + + if len(a) == 0 { + return []KeyTestingOptions{} + } + + items := make([]KeyTestingOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyTestingOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyTestingOptions expands an instance of KeyTestingOptions into a JSON +// request object. +func expandKeyTestingOptions(c *Client, f *KeyTestingOptions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TestingScore; !dcl.IsEmptyValueIndirect(v) { + m["testingScore"] = v + } + if v := f.TestingChallenge; !dcl.IsEmptyValueIndirect(v) { + m["testingChallenge"] = v + } + + return m, nil +} + +// flattenKeyTestingOptions flattens an instance of KeyTestingOptions from a JSON +// response object. +func flattenKeyTestingOptions(c *Client, i interface{}, res *Key) *KeyTestingOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyTestingOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyTestingOptions + } + r.TestingScore = dcl.FlattenDouble(m["testingScore"]) + r.TestingChallenge = flattenKeyTestingOptionsTestingChallengeEnum(m["testingChallenge"]) + + return r +} + +// expandKeyWafSettingsMap expands the contents of KeyWafSettings into a JSON +// request object. +func expandKeyWafSettingsMap(c *Client, f map[string]KeyWafSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyWafSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyWafSettingsSlice expands the contents of KeyWafSettings into a JSON +// request object. +func expandKeyWafSettingsSlice(c *Client, f []KeyWafSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyWafSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyWafSettingsMap flattens the contents of KeyWafSettings from a JSON +// response object. +func flattenKeyWafSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyWafSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWafSettings{} + } + + if len(a) == 0 { + return map[string]KeyWafSettings{} + } + + items := make(map[string]KeyWafSettings) + for k, item := range a { + items[k] = *flattenKeyWafSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyWafSettingsSlice flattens the contents of KeyWafSettings from a JSON +// response object. +func flattenKeyWafSettingsSlice(c *Client, i interface{}, res *Key) []KeyWafSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyWafSettings{} + } + + if len(a) == 0 { + return []KeyWafSettings{} + } + + items := make([]KeyWafSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWafSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyWafSettings expands an instance of KeyWafSettings into a JSON +// request object. +func expandKeyWafSettings(c *Client, f *KeyWafSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.WafService; !dcl.IsEmptyValueIndirect(v) { + m["wafService"] = v + } + if v := f.WafFeature; !dcl.IsEmptyValueIndirect(v) { + m["wafFeature"] = v + } + + return m, nil +} + +// flattenKeyWafSettings flattens an instance of KeyWafSettings from a JSON +// response object. +func flattenKeyWafSettings(c *Client, i interface{}, res *Key) *KeyWafSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyWafSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyWafSettings + } + r.WafService = flattenKeyWafSettingsWafServiceEnum(m["wafService"]) + r.WafFeature = flattenKeyWafSettingsWafFeatureEnum(m["wafFeature"]) + + return r +} + +// flattenKeyWebSettingsIntegrationTypeEnumMap flattens the contents of KeyWebSettingsIntegrationTypeEnum from a JSON +// response object. +func flattenKeyWebSettingsIntegrationTypeEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWebSettingsIntegrationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWebSettingsIntegrationTypeEnum{} + } + + if len(a) == 0 { + return map[string]KeyWebSettingsIntegrationTypeEnum{} + } + + items := make(map[string]KeyWebSettingsIntegrationTypeEnum) + for k, item := range a { + items[k] = *flattenKeyWebSettingsIntegrationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWebSettingsIntegrationTypeEnumSlice flattens the contents of KeyWebSettingsIntegrationTypeEnum from a JSON +// response object. +func flattenKeyWebSettingsIntegrationTypeEnumSlice(c *Client, i interface{}, res *Key) []KeyWebSettingsIntegrationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWebSettingsIntegrationTypeEnum{} + } + + if len(a) == 0 { + return []KeyWebSettingsIntegrationTypeEnum{} + } + + items := make([]KeyWebSettingsIntegrationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWebSettingsIntegrationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWebSettingsIntegrationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWebSettingsIntegrationTypeEnum with the same value as that string. +func flattenKeyWebSettingsIntegrationTypeEnum(i interface{}) *KeyWebSettingsIntegrationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWebSettingsIntegrationTypeEnumRef(s) +} + +// flattenKeyWebSettingsChallengeSecurityPreferenceEnumMap flattens the contents of KeyWebSettingsChallengeSecurityPreferenceEnum from a JSON +// response object. +func flattenKeyWebSettingsChallengeSecurityPreferenceEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWebSettingsChallengeSecurityPreferenceEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + if len(a) == 0 { + return map[string]KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + items := make(map[string]KeyWebSettingsChallengeSecurityPreferenceEnum) + for k, item := range a { + items[k] = *flattenKeyWebSettingsChallengeSecurityPreferenceEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWebSettingsChallengeSecurityPreferenceEnumSlice flattens the contents of KeyWebSettingsChallengeSecurityPreferenceEnum from a JSON +// response object. +func flattenKeyWebSettingsChallengeSecurityPreferenceEnumSlice(c *Client, i interface{}, res *Key) []KeyWebSettingsChallengeSecurityPreferenceEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + if len(a) == 0 { + return []KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + items := make([]KeyWebSettingsChallengeSecurityPreferenceEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWebSettingsChallengeSecurityPreferenceEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWebSettingsChallengeSecurityPreferenceEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWebSettingsChallengeSecurityPreferenceEnum with the same value as that string. +func flattenKeyWebSettingsChallengeSecurityPreferenceEnum(i interface{}) *KeyWebSettingsChallengeSecurityPreferenceEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWebSettingsChallengeSecurityPreferenceEnumRef(s) +} + +// flattenKeyTestingOptionsTestingChallengeEnumMap flattens the contents of KeyTestingOptionsTestingChallengeEnum from a JSON +// response object. +func flattenKeyTestingOptionsTestingChallengeEnumMap(c *Client, i interface{}, res *Key) map[string]KeyTestingOptionsTestingChallengeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyTestingOptionsTestingChallengeEnum{} + } + + if len(a) == 0 { + return map[string]KeyTestingOptionsTestingChallengeEnum{} + } + + items := make(map[string]KeyTestingOptionsTestingChallengeEnum) + for k, item := range a { + items[k] = *flattenKeyTestingOptionsTestingChallengeEnum(item.(interface{})) + } + + return items +} + +// flattenKeyTestingOptionsTestingChallengeEnumSlice flattens the contents of KeyTestingOptionsTestingChallengeEnum from a JSON +// response object. +func flattenKeyTestingOptionsTestingChallengeEnumSlice(c *Client, i interface{}, res *Key) []KeyTestingOptionsTestingChallengeEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyTestingOptionsTestingChallengeEnum{} + } + + if len(a) == 0 { + return []KeyTestingOptionsTestingChallengeEnum{} + } + + items := make([]KeyTestingOptionsTestingChallengeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyTestingOptionsTestingChallengeEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyTestingOptionsTestingChallengeEnum asserts that an interface is a string, and returns a +// pointer to a *KeyTestingOptionsTestingChallengeEnum with the same value as that string. +func flattenKeyTestingOptionsTestingChallengeEnum(i interface{}) *KeyTestingOptionsTestingChallengeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyTestingOptionsTestingChallengeEnumRef(s) +} + +// flattenKeyWafSettingsWafServiceEnumMap flattens the contents of KeyWafSettingsWafServiceEnum from a JSON +// response object. +func flattenKeyWafSettingsWafServiceEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWafSettingsWafServiceEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWafSettingsWafServiceEnum{} + } + + if len(a) == 0 { + return map[string]KeyWafSettingsWafServiceEnum{} + } + + items := make(map[string]KeyWafSettingsWafServiceEnum) + for k, item := range a { + items[k] = *flattenKeyWafSettingsWafServiceEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWafSettingsWafServiceEnumSlice flattens the contents of KeyWafSettingsWafServiceEnum from a JSON +// response object. +func flattenKeyWafSettingsWafServiceEnumSlice(c *Client, i interface{}, res *Key) []KeyWafSettingsWafServiceEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWafSettingsWafServiceEnum{} + } + + if len(a) == 0 { + return []KeyWafSettingsWafServiceEnum{} + } + + items := make([]KeyWafSettingsWafServiceEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWafSettingsWafServiceEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWafSettingsWafServiceEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWafSettingsWafServiceEnum with the same value as that string. +func flattenKeyWafSettingsWafServiceEnum(i interface{}) *KeyWafSettingsWafServiceEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWafSettingsWafServiceEnumRef(s) +} + +// flattenKeyWafSettingsWafFeatureEnumMap flattens the contents of KeyWafSettingsWafFeatureEnum from a JSON +// response object. +func flattenKeyWafSettingsWafFeatureEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWafSettingsWafFeatureEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWafSettingsWafFeatureEnum{} + } + + if len(a) == 0 { + return map[string]KeyWafSettingsWafFeatureEnum{} + } + + items := make(map[string]KeyWafSettingsWafFeatureEnum) + for k, item := range a { + items[k] = *flattenKeyWafSettingsWafFeatureEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWafSettingsWafFeatureEnumSlice flattens the contents of KeyWafSettingsWafFeatureEnum from a JSON +// response object. +func flattenKeyWafSettingsWafFeatureEnumSlice(c *Client, i interface{}, res *Key) []KeyWafSettingsWafFeatureEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWafSettingsWafFeatureEnum{} + } + + if len(a) == 0 { + return []KeyWafSettingsWafFeatureEnum{} + } + + items := make([]KeyWafSettingsWafFeatureEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWafSettingsWafFeatureEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWafSettingsWafFeatureEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWafSettingsWafFeatureEnum with the same value as that string. +func flattenKeyWafSettingsWafFeatureEnum(i interface{}) *KeyWafSettingsWafFeatureEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWafSettingsWafFeatureEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Key) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalKey(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type keyDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp keyApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToKeyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]keyDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []keyDiff + // For each operation name, create a keyDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := keyDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToKeyApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToKeyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (keyApiOperation, error) { + switch opName { + + case "updateKeyUpdateKeyOperation": + return &updateKeyUpdateKeyOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractKeyFields(r *Key) error { + vWebSettings := r.WebSettings + if vWebSettings == nil { + // note: explicitly not the empty object. + vWebSettings = &KeyWebSettings{} + } + if err := extractKeyWebSettingsFields(r, vWebSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWebSettings) { + r.WebSettings = vWebSettings + } + vAndroidSettings := r.AndroidSettings + if vAndroidSettings == nil { + // note: explicitly not the empty object. + vAndroidSettings = &KeyAndroidSettings{} + } + if err := extractKeyAndroidSettingsFields(r, vAndroidSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidSettings) { + r.AndroidSettings = vAndroidSettings + } + vIosSettings := r.IosSettings + if vIosSettings == nil { + // note: explicitly not the empty object. + vIosSettings = &KeyIosSettings{} + } + if err := extractKeyIosSettingsFields(r, vIosSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosSettings) { + r.IosSettings = vIosSettings + } + vTestingOptions := r.TestingOptions + if vTestingOptions == nil { + // note: explicitly not the empty object. + vTestingOptions = &KeyTestingOptions{} + } + if err := extractKeyTestingOptionsFields(r, vTestingOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTestingOptions) { + r.TestingOptions = vTestingOptions + } + vWafSettings := r.WafSettings + if vWafSettings == nil { + // note: explicitly not the empty object. + vWafSettings = &KeyWafSettings{} + } + if err := extractKeyWafSettingsFields(r, vWafSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWafSettings) { + r.WafSettings = vWafSettings + } + return nil +} +func extractKeyWebSettingsFields(r *Key, o *KeyWebSettings) error { + return nil +} +func extractKeyAndroidSettingsFields(r *Key, o *KeyAndroidSettings) error { + return nil +} +func extractKeyIosSettingsFields(r *Key, o *KeyIosSettings) error { + return nil +} +func extractKeyTestingOptionsFields(r *Key, o *KeyTestingOptions) error { + return nil +} +func extractKeyWafSettingsFields(r *Key, o *KeyWafSettings) error { + return nil +} + +func postReadExtractKeyFields(r *Key) error { + vWebSettings := r.WebSettings + if vWebSettings == nil { + // note: explicitly not the empty object. + vWebSettings = &KeyWebSettings{} + } + if err := postReadExtractKeyWebSettingsFields(r, vWebSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWebSettings) { + r.WebSettings = vWebSettings + } + vAndroidSettings := r.AndroidSettings + if vAndroidSettings == nil { + // note: explicitly not the empty object. + vAndroidSettings = &KeyAndroidSettings{} + } + if err := postReadExtractKeyAndroidSettingsFields(r, vAndroidSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidSettings) { + r.AndroidSettings = vAndroidSettings + } + vIosSettings := r.IosSettings + if vIosSettings == nil { + // note: explicitly not the empty object. + vIosSettings = &KeyIosSettings{} + } + if err := postReadExtractKeyIosSettingsFields(r, vIosSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosSettings) { + r.IosSettings = vIosSettings + } + vTestingOptions := r.TestingOptions + if vTestingOptions == nil { + // note: explicitly not the empty object. + vTestingOptions = &KeyTestingOptions{} + } + if err := postReadExtractKeyTestingOptionsFields(r, vTestingOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTestingOptions) { + r.TestingOptions = vTestingOptions + } + vWafSettings := r.WafSettings + if vWafSettings == nil { + // note: explicitly not the empty object. + vWafSettings = &KeyWafSettings{} + } + if err := postReadExtractKeyWafSettingsFields(r, vWafSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWafSettings) { + r.WafSettings = vWafSettings + } + return nil +} +func postReadExtractKeyWebSettingsFields(r *Key, o *KeyWebSettings) error { + return nil +} +func postReadExtractKeyAndroidSettingsFields(r *Key, o *KeyAndroidSettings) error { + return nil +} +func postReadExtractKeyIosSettingsFields(r *Key, o *KeyIosSettings) error { + return nil +} +func postReadExtractKeyTestingOptionsFields(r *Key, o *KeyTestingOptions) error { + return nil +} +func postReadExtractKeyWafSettingsFields(r *Key, o *KeyWafSettings) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go new file mode 100644 index 000000000000..5235a3834f54 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package recaptchaenterprise + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLRecaptchaEnterpriseClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.RecaptchaEnterpriseBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go new file mode 100644 index 000000000000..57a675af7c13 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go @@ -0,0 +1,689 @@ +package recaptchaenterprise + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceRecaptchaEnterpriseKey() *schema.Resource { + return &schema.Resource{ + Create: resourceRecaptchaEnterpriseKeyCreate, + Read: resourceRecaptchaEnterpriseKeyRead, + Update: resourceRecaptchaEnterpriseKeyUpdate, + Delete: resourceRecaptchaEnterpriseKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRecaptchaEnterpriseKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: "Human-readable display name of this key. Modifiable by user.", + }, + + "android_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by Android apps.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyAndroidSettingsSchema(), + ConflictsWith: []string{"web_settings", "ios_settings"}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "ios_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by iOS apps.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyIosSettingsSchema(), + ConflictsWith: []string{"web_settings", "android_settings"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "testing_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Options for user acceptance testing.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyTestingOptionsSchema(), + }, + + "waf_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Settings specific to keys that can be used for WAF (Web Application Firewall).", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyWafSettingsSchema(), + }, + + "web_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by websites.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyWebSettingsSchema(), + ConflictsWith: []string{"android_settings", "ios_settings"}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "The timestamp corresponding to the creation of this Key.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels).\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The resource id for the Key, which is the same as the Site Key itself.", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + }, + } +} + +func RecaptchaEnterpriseKeyAndroidSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all_package_names": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_package_names will not be enforced.", + }, + + "allowed_package_names": { + Type: schema.TypeList, + Optional: true, + Description: "Android package names of apps allowed to use the key. Example: 'com.companyname.appname'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func RecaptchaEnterpriseKeyIosSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all_bundle_ids": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_bundle_ids will not be enforced.", + }, + + "allowed_bundle_ids": { + Type: schema.TypeList, + Optional: true, + Description: "iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func RecaptchaEnterpriseKeyTestingOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "testing_challenge": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE", + }, + + "testing_score": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Description: "All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive.", + }, + }, + } +} + +func RecaptchaEnterpriseKeyWafSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "waf_feature": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS", + }, + + "waf_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The WAF service that uses this key. Possible values: CA, FASTLY", + }, + }, + } +} + +func RecaptchaEnterpriseKeyWebSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "integration_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE", + }, + + "allow_all_domains": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_domains will not be enforced.", + }, + + "allow_amp_traffic": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type.", + }, + + "allowed_domains": { + Type: schema.TypeList, + Optional: true, + Description: "Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "challenge_security_preference": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY", + }, + }, + } +} + +func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Key: %s", err) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceRecaptchaEnterpriseKeyRead(d, meta) +} + +func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetKey(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("RecaptchaEnterpriseKey %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("android_settings", flattenRecaptchaEnterpriseKeyAndroidSettings(res.AndroidSettings)); err != nil { + return fmt.Errorf("error setting android_settings in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("ios_settings", flattenRecaptchaEnterpriseKeyIosSettings(res.IosSettings)); err != nil { + return fmt.Errorf("error setting ios_settings in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("testing_options", flattenRecaptchaEnterpriseKeyTestingOptions(res.TestingOptions)); err != nil { + return fmt.Errorf("error setting testing_options in state: %s", err) + } + if err = d.Set("waf_settings", flattenRecaptchaEnterpriseKeyWafSettings(res.WafSettings)); err != nil { + return fmt.Errorf("error setting waf_settings in state: %s", err) + } + if err = d.Set("web_settings", flattenRecaptchaEnterpriseKeyWebSettings(res.WebSettings)); err != nil { + return fmt.Errorf("error setting web_settings in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenRecaptchaEnterpriseKeyLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("terraform_labels", flattenRecaptchaEnterpriseKeyTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + + return nil +} +func resourceRecaptchaEnterpriseKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceRecaptchaEnterpriseKeyRead(d, meta) +} + +func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Key %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteKey(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Key: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Key %q", d.Id()) + return nil +} + +func resourceRecaptchaEnterpriseKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/keys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandRecaptchaEnterpriseKeyAndroidSettings(o interface{}) *KeyAndroidSettings { + if o == nil { + return EmptyKeyAndroidSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyAndroidSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyAndroidSettings{ + AllowAllPackageNames: dcl.Bool(obj["allow_all_package_names"].(bool)), + AllowedPackageNames: tpgdclresource.ExpandStringArray(obj["allowed_package_names"]), + } +} + +func flattenRecaptchaEnterpriseKeyAndroidSettings(obj *KeyAndroidSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all_package_names": obj.AllowAllPackageNames, + "allowed_package_names": obj.AllowedPackageNames, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyIosSettings(o interface{}) *KeyIosSettings { + if o == nil { + return EmptyKeyIosSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyIosSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyIosSettings{ + AllowAllBundleIds: dcl.Bool(obj["allow_all_bundle_ids"].(bool)), + AllowedBundleIds: tpgdclresource.ExpandStringArray(obj["allowed_bundle_ids"]), + } +} + +func flattenRecaptchaEnterpriseKeyIosSettings(obj *KeyIosSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all_bundle_ids": obj.AllowAllBundleIds, + "allowed_bundle_ids": obj.AllowedBundleIds, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyTestingOptions(o interface{}) *KeyTestingOptions { + if o == nil { + return EmptyKeyTestingOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyTestingOptions + } + obj := objArr[0].(map[string]interface{}) + return &KeyTestingOptions{ + TestingChallenge: KeyTestingOptionsTestingChallengeEnumRef(obj["testing_challenge"].(string)), + TestingScore: dcl.Float64(obj["testing_score"].(float64)), + } +} + +func flattenRecaptchaEnterpriseKeyTestingOptions(obj *KeyTestingOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "testing_challenge": obj.TestingChallenge, + "testing_score": obj.TestingScore, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyWafSettings(o interface{}) *KeyWafSettings { + if o == nil { + return EmptyKeyWafSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyWafSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyWafSettings{ + WafFeature: KeyWafSettingsWafFeatureEnumRef(obj["waf_feature"].(string)), + WafService: KeyWafSettingsWafServiceEnumRef(obj["waf_service"].(string)), + } +} + +func flattenRecaptchaEnterpriseKeyWafSettings(obj *KeyWafSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "waf_feature": obj.WafFeature, + "waf_service": obj.WafService, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyWebSettings(o interface{}) *KeyWebSettings { + if o == nil { + return EmptyKeyWebSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyWebSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyWebSettings{ + IntegrationType: KeyWebSettingsIntegrationTypeEnumRef(obj["integration_type"].(string)), + AllowAllDomains: dcl.Bool(obj["allow_all_domains"].(bool)), + AllowAmpTraffic: dcl.Bool(obj["allow_amp_traffic"].(bool)), + AllowedDomains: tpgdclresource.ExpandStringArray(obj["allowed_domains"]), + ChallengeSecurityPreference: KeyWebSettingsChallengeSecurityPreferenceEnumRef(obj["challenge_security_preference"].(string)), + } +} + +func flattenRecaptchaEnterpriseKeyWebSettings(obj *KeyWebSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "integration_type": obj.IntegrationType, + "allow_all_domains": obj.AllowAllDomains, + "allow_amp_traffic": obj.AllowAmpTraffic, + "allowed_domains": obj.AllowedDomains, + "challenge_security_preference": obj.ChallengeSecurityPreference, + } + + return []interface{}{transformed} + +} + +func flattenRecaptchaEnterpriseKeyLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenRecaptchaEnterpriseKeyTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go new file mode 100644 index 000000000000..9df2c9b9c5ca --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go @@ -0,0 +1,492 @@ +package recaptchaenterprise_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func TestAccRecaptchaEnterpriseKey_AndroidKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_AndroidKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_AndroidKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_IosKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_IosKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_IosKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_MinimalKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_MinimalKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WafKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WafKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WebKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WebKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_WebKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WebScoreKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WebScoreKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_WebScoreKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccRecaptchaEnterpriseKey_AndroidKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + android_settings { + allow_all_package_names = true + allowed_package_names = [] + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.8 + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_AndroidKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + android_settings { + allow_all_package_names = false + allowed_package_names = ["com.android.application"] + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.8 + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_IosKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + ios_settings { + allow_all_bundle_ids = true + allowed_bundle_ids = [] + } + + project = "%{project_name}" + + testing_options { + testing_score = 1 + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_IosKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + ios_settings { + allow_all_bundle_ids = false + allowed_bundle_ids = ["com.companyname.appname"] + } + + project = "%{project_name}" + + testing_options { + testing_score = 1 + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_MinimalKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + } + + labels = {} +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WafKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + waf_settings { + waf_feature = "CHALLENGE_PAGE" + waf_service = "CA" + } + + web_settings { + integration_type = "INVISIBLE" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = false + allowed_domains = ["subdomain.example.com"] + challenge_security_preference = "SECURITY" + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebScoreKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + allow_amp_traffic = false + allowed_domains = [] + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebScoreKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + project = "%{project_name}" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = false + allow_amp_traffic = true + allowed_domains = ["subdomain.example.com"] + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_recaptcha_enterprise_key" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := recaptchaenterprise.NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetKey(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_recaptcha_enterprise_key still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml index ceb7cf403e09..b5b18e91c1cd 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_recaptcha_enterprise_key' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'recaptchaenterprise.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Key' diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go new file mode 100644 index 000000000000..443deff1d889 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go @@ -0,0 +1,53 @@ +package recaptchaenterprise + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("RecaptchaEnterpriseKey", testSweepRecaptchaEnterpriseKey) +} + +func testSweepRecaptchaEnterpriseKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for RecaptchaEnterpriseKey") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableRecaptchaEnterpriseKey) + if err != nil { + return err + } + return nil +} + +func isDeletableRecaptchaEnterpriseKey(r *Key) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/canonicalize.go b/mmv1/third_party/terraform/tpgdclresource/canonicalize.go new file mode 100755 index 000000000000..0ca0729f21f9 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/canonicalize.go @@ -0,0 +1,857 @@ +package tpgdclresource + +import ( + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "bitbucket.org/creachadair/stringset" + glog "github.com/golang/glog" +) + +var selfLinkIgnorableComponents = stringset.New("projects", "regions", "locations", "zones", "organizations", "compute", "v1", "v1beta1", "beta") + +// SelfLinkToSelfLink returns true if left and right are equivalent for selflinks. +// That means that they are piecewise equal, comparing components, allowing for +// certain elements to be dropped ("projects", "regions", etc.). It also allows +// any value to be present in the second-to-last field (where "instances" or +// "databases", etc, would be.) +func SelfLinkToSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if lurl, err := url.Parse(left); err == nil { + left = lurl.EscapedPath() + } + if rurl, err := url.Parse(right); err == nil { + right = rurl.EscapedPath() + } + if strings.HasPrefix(left, "/") { + left = left[1:len(left)] + } + if strings.HasPrefix(right, "/") { + right = right[1:len(right)] + } + if strings.HasSuffix(left, right) || strings.HasSuffix(right, left) { + return true + } + lcomp := strings.Split(left, "/") + rcomp := strings.Split(right, "/") + li := 0 + ri := 0 + for li < len(lcomp) && ri < len(rcomp) { + switch { + case lcomp[li] == rcomp[ri]: + li++ + ri++ + case selfLinkIgnorableComponents.Contains(lcomp[li]): + li++ + case selfLinkIgnorableComponents.Contains(rcomp[ri]): + ri++ + // The second-to-last element in a long-form self-link contains the + // name of the resource. The name of the resource might be anything, + // rather than keep a list of all resources, we will just ignore + // the second-to-last field if one argument is exactly one remaining + // field longer than the other. + case len(lcomp) == li+2 && len(rcomp) == ri+1: + li++ + case len(rcomp) == ri+2 && len(lcomp) == li+1: + ri++ + default: + return false + } + } + return true +} + +// StringCanonicalize checks canonicalization for strings. It matches self-links using NameToSelfLink. +func StringCanonicalize(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if left == right { + return true + } + + if IsPartialSelfLink(left) || IsPartialSelfLink(right) || IsSelfLink(left) || IsSelfLink(right) { + return NameToSelfLink(l, r) + } + + return false +} + +// StringArrayCanonicalize checks canonicalization for arrays of strings. It matches self-links using NameToSelfLink. +func StringArrayCanonicalize(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i := range l { + if !StringCanonicalize(&l[i], &r[i]) { + return false + } + } + return true +} + +// BoolCanonicalize checks canonicalization for booleans. +func BoolCanonicalize(l, r *bool) bool { + if l == nil && r == nil { + return true + } + if l != nil && r == nil { + left := *l + return left == false + } + + if r != nil && l == nil { + right := *r + return right == false + } + + left := *l + right := *r + + return left == right +} + +// NameToSelfLink returns true if left and right are equivalent for Names / SelfLinks. +// It allows all the deviations that SelfLinkToSelfLink allows, plus it allows one +// of the values to simply be the last element of the other value. +func NameToSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if left == right { + return true + } + lcomp := strings.Split(left, "/") + rcomp := strings.Split(right, "/") + if len(lcomp) > 1 && len(rcomp) > 1 { + return SelfLinkToSelfLink(&left, &right) + } + if len(lcomp) > 1 && lcomp[len(lcomp)-1] == right { + return true + } + if len(rcomp) > 1 && rcomp[len(rcomp)-1] == left { + return true + } + return false +} + +// PartialSelfLinkToSelfLink returns true if left and right are equivalent for SelfLinks and partial +// SelfLinks. It allows all the deviations that SelfLink allows, except that it works +// backwards, and returns true when one or the other is empty - in that sense, it allows whatever +// specification, starting from the most-specific +func PartialSelfLinkToSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if left == "" && right == "" { + return true + } + if left == "" || right == "" { + return false + } + if NameToSelfLink(&left, &right) { + return true + } + lcomp := strings.Split(left, "/") + rcomp := strings.Split(right, "/") + li := len(lcomp) - 1 + ri := len(rcomp) - 1 + for li >= 0 && ri >= 0 { + switch { + case lcomp[li] == rcomp[ri]: + li-- + ri-- + case selfLinkIgnorableComponents.Contains(lcomp[li]): + li-- + case selfLinkIgnorableComponents.Contains(rcomp[ri]): + ri-- + // As in SelfLinkToSelfLink, we permit any value in the second-to-last field + // for the value which is longer. + case len(lcomp) == li+2 && len(rcomp) == ri+2 && li > ri: + li-- + case len(lcomp) == li+2 && len(rcomp) == ri+2 && ri > li: + ri-- + default: + return false + } + + } + return true +} + +// PartialSelfLinkToSelfLinkArray returns true if left and right are all equivalent for SelfLinks. +func PartialSelfLinkToSelfLinkArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i := range l { + if !PartialSelfLinkToSelfLink(&l[i], &r[i]) { + return false + } + } + return true +} + +func WithoutTrailingDotArrayInterface(l, r any) bool { + lVal, _ := l.([]string) + rVal, _ := r.([]string) + return WithoutTrailingDotArray(lVal, rVal) +} + +// WithoutTrailingDotArray returns true if WithoutTrailingDot returns true for each +// pair of elements in the lists. +func WithoutTrailingDotArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i, lv := range l { + if !WithoutTrailingDot(lv, r[i]) { + return false + } + } + return true +} + +// WithoutTrailingDot returns true if the arguments are equivalent ignoring a final period. +// This is useful for comparing absolute & relative domain names. +func WithoutTrailingDot(l, r string) bool { + return strings.TrimSuffix(l, ".") == strings.TrimSuffix(r, ".") +} + +// QuoteAndCaseInsensitiveString returns true if the arguments are considered equal ignoring case +// and quotedness (e.g. "foo" and foo are equivalent). +func QuoteAndCaseInsensitiveString(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + if uq, err := strconv.Unquote(*l); err == nil { + l = &uq + } + if uq, err := strconv.Unquote(*r); err == nil { + r = &uq + } + return CaseInsensitiveString(l, r) +} + +// QuoteAndCaseInsensitiveStringArray returns true if the arguments are considered equal ignoring case +// and quotedness (e.g. "foo" and foo are equivalent), but including ordering. +func QuoteAndCaseInsensitiveStringArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i := range l { + if uq, err := strconv.Unquote(l[i]); err == nil { + l[i] = uq + } + if uq, err := strconv.Unquote(r[i]); err == nil { + r[i] = uq + } + } + return CaseInsensitiveStringArray(l, r) +} + +// CaseInsensitiveStringArray returns true if the arguments are considered equal ignoring case, +// but including ordering. +func CaseInsensitiveStringArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i, lv := range l { + if !strings.EqualFold(lv, r[i]) { + return false + } + } + return true +} + +// CaseInsensitiveString returns true if the arguments are considered equal ignoring case. +func CaseInsensitiveString(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + return strings.EqualFold(*l, *r) +} + +// IsZeroValue returns true if the argument is considered empty/unset. +func IsZeroValue(v any) bool { + if t, ok := v.(time.Time); ok { + return t.IsZero() + } + val := reflect.ValueOf(v) + return !val.IsValid() || !reflect.Indirect(val).IsValid() || ((val.Kind() == reflect.Interface || + val.Kind() == reflect.Chan || + val.Kind() == reflect.Func || + val.Kind() == reflect.Ptr || + val.Kind() == reflect.Map || + val.Kind() == reflect.Slice) && val.IsNil()) +} + +// SliceEquals takes in two slices of strings and checks their equality +func SliceEquals(v []string, q []string) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if v[i] != q[i] { + return false + } + } + return true +} + +// MapEquals returns if two maps are equal, while ignoring any keys with ignorePrefixes. +func MapEquals(di, ai any, ignorePrefixes []string) bool { + d, ok := di.(map[string]string) + if !ok { + return false + } + + a, ok := ai.(map[string]string) + if !ok { + return false + } + + for k, v := range d { + if isIgnored(k, ignorePrefixes) { + continue + } + + av, ok := a[k] + if !ok { + return false + } + if !reflect.DeepEqual(v, av) { + return false + } + } + + for k, v := range a { + if isIgnored(k, ignorePrefixes) { + continue + } + + dv, ok := d[k] + if !ok { + return false + } + if !reflect.DeepEqual(v, dv) { + return false + } + } + + return true + +} + +// isIgnored returns true if this prefix should be ignored. +func isIgnored(v string, ignoredPrefixes []string) bool { + for _, p := range ignoredPrefixes { + if strings.Contains(v, p) { + return true + } + } + return false +} + +// CompareStringSets returns two slices of strings, +// one of strings in set a but not b, and one of strings in set b but not a. +func CompareStringSets(a, b []string) (toAdd, toRemove []string) { + for _, item := range a { + inB := false + for _, i2 := range b { + if i2 == item { + inB = true + } + } + if !inB { + toAdd = append(toAdd, item) + } + } + for _, item := range b { + inA := false + for _, i2 := range a { + if i2 == item { + inA = true + } + } + if !inA { + toRemove = append(toRemove, item) + } + } + return +} + +// WrapStringsWithKey returns a slice of maps with one key (the 'key' argument) +// and one value (each value in 'values'). +// e.g. ("foo", ["bar", "baz", "qux"]) => [{"foo": "bar"}, {"foo": "baz"}, {"foo": "qux"}]. +// Useful for, for instance, +// https://cloud.google.com/compute/docs/reference/rest/v1/targetPools/addHealthCheck +func WrapStringsWithKey(key string, values []string) ([]map[string]string, error) { + r := make([]map[string]string, len(values)) + for i, v := range values { + r[i] = map[string]string{key: v} + } + return r, nil +} + +// FloatSliceEquals takes in two slices of float64s and checks their equality +func FloatSliceEquals(v []float64, q []float64) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if v[i] != q[i] { + return false + } + } + return true +} + +// IntSliceEquals takes in two slices of int64s and checks their equality +func IntSliceEquals(v []int64, q []int64) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if v[i] != q[i] { + return false + } + } + return true +} + +// StringSliceEquals returns true if v, q arrays of strings are equal according to StringEquals. +func StringSliceEquals(v, q []string) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if !StringEquals(&v[i], &q[i]) { + return false + } + } + return true +} + +// UnorderedStringSliceEquals returns true if a, b contains same set of elements irrespective of their ordering. +func UnorderedStringSliceEquals(a, b []string) bool { + aMap := make(map[string]int) + bMap := make(map[string]int) + + for _, val := range a { + aMap[val]++ + } + for _, val := range b { + bMap[val]++ + } + + if len(aMap) != len(bMap) { + return false + } + + for k, v := range aMap { + bv, ok := bMap[k] + if !ok { + return false + } + if v != bv { + return false + } + } + + return true +} + +// StringSliceEqualsWithSelfLink returns true if v, q arrays of strings are equal according to StringEqualsWithSelfLink +func StringSliceEqualsWithSelfLink(v, q []string) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if !StringEqualsWithSelfLink(&v[i], &q[i]) { + return false + } + } + return true +} + +// DeriveFieldArray calls DeriveField on each entry in the provided slice. The final +// entry in the input variadic argument can be a slice, and those values will be replaced +// by the values in the provided current value. +func DeriveFieldArray(pattern string, cVal []string, fs ...any) ([]string, error) { + var s []string + var allFs []*string + for _, f := range fs[:len(fs)-1] { + allFs = append(allFs, f.(*string)) + } + for _, cv := range cVal { + glog.Infof("deriving %q from %q, %v", pattern, cv, append(allFs, &cv)) + sval, err := DeriveField(pattern, &cv, append(allFs, &cv)...) + if err != nil { + return nil, err + } + if sval == nil { + return nil, fmt.Errorf("got nil back from DeriveField for %q", cv) + } + s = append(s, *sval) + glog.Infof("derived %q", *sval) + } + return s, nil +} + +// DeriveField deals with the outgoing portion of derived fields. The derived fields' +// inputs might be in any form - for instance, a derived name field might be set to +// project/region/name, projects/project/regions/region/objects/name, or just name. +// This function returns the best reasonable guess at the user's intent. If the current +// value (cVal) matches any of those, it will return the current value. If it doesn't, +// it will be ignored (even if nil). +func DeriveField(pattern string, cVal *string, fs ...*string) (*string, error) { + var currentValue string + // interface{} for fmt.Sprintf. + fields := make([]any, len(fs)) + if cVal == nil { + // might still be doable from "fields"! + currentValue = "" + } else { + currentValue = *cVal + } + for i, f := range fs { + if IsEmptyValueIndirect(f) { + if currentValue == "" { + // This field may not be required, so we shouldn't error out. + // Erroring out would cause the DCL to stop if this field isn't set (which it might not be!) + return nil, nil + } + // might still be doable from currentValue + fields[i] = "" + } else { + fields[i] = *f + } + } + + patternParts := strings.Split(pattern, "/") + valueParts := strings.Split(currentValue, "/") + + // currentValue may be a full self-link, so we need to filter out unnecessary beginning parts. + if len(valueParts) > len(patternParts) { + for index, valuePart := range valueParts { + if valuePart == patternParts[0] { + valueParts = valueParts[index:len(valueParts)] + break + } + } + } + + if len(patternParts) == len(valueParts) { + // check if the current value fits the pattern. + match := true + for i := range patternParts { + if patternParts[i] != "%s" && valueParts[i] != patternParts[i] { + match = false + break + } + } + if match { + return ¤tValue, nil + } + } + if len(valueParts) == strings.Count(pattern, "%s") { + iParts := make([]any, len(valueParts)) + for i, s := range valueParts { + iParts[i] = s + } + value := fmt.Sprintf(pattern, iParts...) + return &value, nil + } + value := fmt.Sprintf(pattern, fields...) + return &value, nil +} + +// IsEmptyValueIndirect returns true if the value provided is "empty", according +// to the golang rules. This corresponds to whether the value should be sent by the +// client if the existing value is nil - it is useful for diffing a response against a provided +// value. The "Indirect" refers to the fact that this method returns correct +// results even if the provided value is a pointer. +func IsEmptyValueIndirect(i any) bool { + if i == nil { + return true + } + + rt := reflect.TypeOf(i) + switch rt.Kind() { + case reflect.Slice: + return reflect.ValueOf(i).Len() == 0 + case reflect.Array: + return rt.Len() == 0 + case reflect.Map: + return len(reflect.ValueOf(i).MapKeys()) == 0 + } + + iv := reflect.Indirect(reflect.ValueOf(i)) + + // All non-nil bool values are not empty. + if iv.Kind() == reflect.Bool { + return false + } + + if !iv.IsValid() || iv.IsZero() { + return true + } + if hasEmptyStructField(i) { + return true + } + return false +} + +// hasEmptyStructField returns true if the provided value is a struct +// with an unexported field called 'empty', and that value is a boolean, +// and that boolean is true. This is useful when a user needs to explicitly +// set their intention that a value be empty. +func hasEmptyStructField(i any) bool { + iv := reflect.Indirect(reflect.ValueOf(i)) + if !iv.IsValid() { + return false + } + if iv.Kind() == reflect.Struct { + if iv.FieldByName("empty").IsValid() && iv.FieldByName("empty").Bool() { + return true + } + } + return false +} + +// MatchingSemverInterface matches two interfaces according to MatchingSemver +func MatchingSemverInterface(lp, rp any) bool { + if lp == nil && rp == nil { + return true + } + if lp == nil || rp == nil { + return false + } + + lpVal, _ := lp.(*string) + rpVal, _ := rp.(*string) + return MatchingSemver(lpVal, rpVal) +} + +// MatchingSemver returns whether the two strings should be considered equivalent +// according to semver rules. If one provides more detail than the other, this is +// acceptable, as long as both are consistent in the detail they do provide. +// For instance, 1.16 == 1.16.4 != 1.15. +func MatchingSemver(lp, rp *string) bool { + if lp == nil && rp == nil { + return true + } + if lp == nil || rp == nil { + return false + } + l := *lp + r := *rp + if l == "latest" || r == "latest" { + return true + } + + // If default version chosen, we should assume API returned the default version. + if l == "-" { + return true + } + + ld := strings.Split(l, "-") + rd := strings.Split(r, "-") + if ld[0] == rd[0] { + return true + } + if len(ld) == 2 && len(rd) == 2 { + // nonmatching post-dash version. + return false + } + ldo := strings.Split(ld[0], ".") + rdo := strings.Split(rd[0], ".") + + for i := 0; i < len(ldo) && i < len(rdo); i++ { + if ldo[i] != rdo[i] { + return false + } + } + return true +} + +// DeriveFromPattern attempts to achieve the same end goal as DeriveField +// but by using regular expressions rather than assumptions about the +// format of the inputs based on the number of `/`. This is important for fields that allow `/` +// characters in their names. +func DeriveFromPattern(pattern string, cVal *string, fs ...*string) (*string, error) { + var currentValue string + if cVal == nil { + // might still be doable from "fields"! + currentValue = "" + } else { + currentValue = *cVal + } + + if !strings.HasSuffix(pattern, "%s") { + // If the pattern does not end with %s we cannot assume anything past the last expected + // `/` character is part of a name + return nil, fmt.Errorf("pattern did not end with %%s, it does not work with the current implementation %v", pattern) + } + // Build regexp from pattern + regex, err := regexFromPattern(pattern) + if err != nil { + return nil, err + } + + if matches := regex.FindStringSubmatch(currentValue); len(matches) > 0 { + // Found a match to the pattern, use the capture groups to populate the pattern + s := make([]any, len(matches)) + for i, v := range matches { + s[i] = v + } + value := fmt.Sprintf(pattern, s[1:]...) + return &value, nil + } + + // Did not find a match to the pattern, use the fields to populate the pattern + fields := make([]any, len(fs)) + + for i, f := range fs { + if f == nil { + // This field may not be required, so we shouldn't error out. + // Erroring out would cause the DCL to stop if this field isn't set (which it might not be!) + return nil, nil + } + fields[i] = *f + } + value := fmt.Sprintf(pattern, fields...) + return &value, nil +} + +func regexFromPattern(pattern string) (*regexp.Regexp, error) { + // Replace string formatting with capture groups except for the last one + // the last one will capture all trailing values + re := strings.Replace(pattern, "%s", "([^/]+)", strings.Count(pattern, "%s")-1) + // Wildcard capture at the end, allows for the last value to include `/` characters + re = strings.ReplaceAll(re, "%s", "(.+)") + return regexp.Compile(re) +} + +// NameFromSelfLink takes in a self link string and returns the name. +func NameFromSelfLink(sl *string) (*string, error) { + if sl == nil { + return nil, nil + } + curNameParts := strings.Split(*sl, "/") + val := curNameParts[len(curNameParts)-1] + return &val, nil +} + +// StringEqualsWithSelfLink returns true if these two strings are equal. +// If these functions are self links, they'll do self-link comparisons. +func StringEqualsWithSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + + if l == nil || r == nil { + return false + } + + left := *l + right := *r + + if IsSelfLink(left) || IsSelfLink(right) || IsPartialSelfLink(left) || IsPartialSelfLink(right) { + lp := strings.Split(left, "/") + rp := strings.Split(right, "/") + return lp[len(lp)-1] == rp[len(rp)-1] + } else { + return left == right + } +} + +// StringEquals returns true if these two strings are equal. +func StringEquals(l, r *string) bool { + if l == nil && r == nil { + return true + } + + if l == nil || r == nil { + return false + } + + left := *l + right := *r + + return left == right +} + +// IsPartialSelfLink returns true if this string represents a partial self link. +func IsPartialSelfLink(s string) bool { + return strings.HasPrefix(s, "projects/") || strings.HasPrefix(s, "organizations/") || strings.HasPrefix(s, "folders/") || strings.HasPrefix(s, "billingAccounts/") || strings.HasPrefix(s, "tagKeys/") || strings.HasPrefix(s, "tagValues/") || strings.HasPrefix(s, "groups/") +} + +// IsSelfLink returns true if this string represents a full self link. +func IsSelfLink(s string) bool { + r := regexp.MustCompile(`(https:\/\/)?(www\.)?([a-z]*)?googleapis.com\/`) + return r.MatchString(s) +} + +// ValueShouldBeSent returns if a value should be sent as part of the JSON request. +func ValueShouldBeSent(v any) bool { + if v == nil { + return false + } + + iv := reflect.Indirect(reflect.ValueOf(v)) + + // All booleans should be sent. + if iv.Kind() == reflect.Bool { + return true + } + + if !iv.IsValid() || iv.IsZero() { + return false + } + + return !IsEmptyValueIndirect(v) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/client.go b/mmv1/third_party/terraform/tpgdclresource/client.go new file mode 100755 index 000000000000..2ed2de5b7820 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/client.go @@ -0,0 +1,7 @@ +package tpgdclresource + +// Scopes defines the common OAuth scopes needed for clients making GCP API calls. +var Scopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", +} diff --git a/mmv1/third_party/terraform/tpgdclresource/config.go b/mmv1/third_party/terraform/tpgdclresource/config.go new file mode 100755 index 000000000000..a726ec5644b1 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/config.go @@ -0,0 +1,573 @@ +package tpgdclresource + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "regexp" + "strings" + "time" + + // glog aliased import is necessary since these packages will be open-sourced + // and that is the public name of the google logging package. + glog "github.com/golang/glog" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +const ua = "DeclarativeClientLib/0.0.1" + +const defaultTimeout = 15 * time.Minute + +// ConfigOption is used to functionally configure Configs. +type ConfigOption func(*Config) + +// Config is used to enclose the credentials and http client used to make +// requests to GCP APIs. +type Config struct { + RetryProvider RetryProvider + codeRetryability map[int]Retryability + timeout time.Duration + header http.Header + clientOptions []option.ClientOption + userAgent string + contentType string + queryParams map[string]string + Logger ContextLogger + BasePath string + billingProject string + userOverrideProject bool +} + +// Retryability holds the details for one error code to determine if it is retyable. +// The regex field is compiled for use in error handling. +// To be retryable, the boolean must be true and the regex must match. +type Retryability struct { + Retryable bool + Pattern string + regex *regexp.Regexp + Timeout time.Duration +} + +// UserAgent returns the user agent for the config, which will always include the +// declarative SDK name and version. +func (c *Config) UserAgent() string { + if c.userAgent != "" { + return fmt.Sprintf("%s %s", c.userAgent, ua) + } + return ua +} + +// NewConfig creates a Config object. +func NewConfig(o ...ConfigOption) *Config { + retryable := Retryability{ + Retryable: true, + regex: regexp.MustCompile(".*"), + Timeout: defaultTimeout, + } + nonretryable := Retryability{Retryable: false} + c := &Config{ + codeRetryability: map[int]Retryability{ + 400: Retryability{ + Retryable: true, + regex: regexp.MustCompile("The resource '[-/a-zA-Z0-9]*' is not ready"), + Timeout: defaultTimeout, + }, + 403: Retryability{ + Retryable: true, + regex: regexp.MustCompile(".*API request rate quota.*"), + Timeout: defaultTimeout, + }, + 404: nonretryable, + 409: nonretryable, + 429: retryable, + 500: retryable, + 502: retryable, + 503: retryable, + }, + contentType: "application/json", + queryParams: map[string]string{"alt": "json"}, + Logger: ContextLogger{ + logger: DefaultLogger(LoggerInfo), + }, + RetryProvider: &BackoffRetryProvider{}, + } + + for _, opt := range o { + opt(c) + } + + return c +} + +// Clone returns a copy of an existing Config with optional new values. +func (c *Config) Clone(o ...ConfigOption) *Config { + result := &Config{ + RetryProvider: c.RetryProvider, + codeRetryability: c.codeRetryability, + timeout: c.timeout, + clientOptions: c.clientOptions, + userAgent: c.userAgent, + contentType: c.contentType, + queryParams: c.queryParams, + Logger: c.Logger, + BasePath: c.BasePath, + billingProject: c.billingProject, + userOverrideProject: c.userOverrideProject, + } + + if c.header != nil { + result.header = c.header.Clone() + } + + for _, opt := range o { + opt(result) + } + + return result +} + +// TimeoutOr returns a timeout for this config. If WithTimeout() was called, that timeout +// is used; if WithTimeout() was not called and a value is provided with `t`, that is used. +// Otherwise the default timeout is returned; +func (c *Config) TimeoutOr(t time.Duration) time.Duration { + if c.timeout != 0 { + return c.timeout + } else if t != 0 { + return t + } + return defaultTimeout +} + +type loggingTransport struct { + underlyingTransport http.RoundTripper + logger ContextLogger +} + +func (t loggingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + shouldLogRequest, err := ShouldLogRequest(req.Context()) + if err != nil { + t.logger.Infof("Error fetching ShouldLogRequest value: %v", err) + } + reqDump, err := httputil.DumpRequestOut(req, true) + randString := RandomString(5) + if err == nil { + if shouldLogRequest { + t.logger.InfoWithContextf(req.Context(), "Google API Request: (id %s)\n-----------[REQUEST]----------\n%s\n-------[END REQUEST]--------", randString, strings.ReplaceAll(string(reqDump), "\r\n", "\n")) + } + } else { + t.logger.WarningWithContextf(req.Context(), "Failed to make request (id %s): %s", randString, err) + } + resp, err := t.underlyingTransport.RoundTrip(req) + if err == nil { + respDump, err := httputil.DumpResponse(resp, true) + if err == nil { + respDumpStr := string(respDump) + if shouldLogRequest { + t.logger.InfoWithContextf(req.Context(), "Google API Response: (id %s) \n-----------[RESPONSE]----------\n%s\n-------[END RESPONSE]--------", randString, strings.ReplaceAll(respDumpStr, "\r\n", "\n")) + } else if resp.StatusCode >= 400 || strings.Contains(respDumpStr, "error") { + t.logger.InfoWithContextf(req.Context(), "Google API Request: (id %s)\n-----------[REQUEST]----------\n%s\n-------[END REQUEST]--------", randString, strings.ReplaceAll(string(reqDump), "\r\n", "\n")) + t.logger.InfoWithContextf(req.Context(), "Google API Response: (id %s) \n-----------[RESPONSE]----------\n%s\n-------[END RESPONSE]--------", randString, strings.ReplaceAll(respDumpStr, "\r\n", "\n")) + } + } else { + t.logger.WarningWithContextf(req.Context(), "Failed to parse response (id %s): %s", randString, err) + } + } else { + t.logger.WarningWithContextf(req.Context(), "Failed to get response (id %s): %s", randString, err) + } + return resp, err +} + +// ApplyOption is an option that is accepted by Apply() functions. +type ApplyOption interface { + Apply(*ApplyOpts) +} + +// ApplyOpts refers to options that are taken in the apply function. +type ApplyOpts struct { + params []LifecycleParam + stateHint Resource +} + +type lifecycleParamOption struct { + param LifecycleParam +} + +func (l lifecycleParamOption) Apply(o *ApplyOpts) { + o.params = append(o.params, l.param) +} + +// WithLifecycleParam allows a user to specify the proper lifecycle params. +func WithLifecycleParam(d LifecycleParam) ApplyOption { + return lifecycleParamOption{param: d} +} + +// FetchLifecycleParams returns the list of lifecycle params. +func FetchLifecycleParams(c []ApplyOption) []LifecycleParam { + var o ApplyOpts + for _, p := range c { + p.Apply(&o) + } + return o.params +} + +type stateHint struct { + state Resource +} + +func (s stateHint) Apply(o *ApplyOpts) { + o.stateHint = s.state +} + +// WithStateHint takes in a resource which will be used in place of the applied +// resource any time the current configuration of the resource is relevant. +// For instance, if an identity field will change, passing a state hint will ensure +// that the current resource is fetched (and possibly deleted). +func WithStateHint(r Resource) ApplyOption { + return stateHint{state: r} +} + +// FetchStateHint returns either nil or a Resource representing the pre-apply state. +func FetchStateHint(c []ApplyOption) Resource { + var o ApplyOpts + for _, p := range c { + p.Apply(&o) + } + return o.stateHint +} + +// WithRetryProvider allows a user to override default exponential backoff retry behavior. +func WithRetryProvider(r RetryProvider) ConfigOption { + return func(c *Config) { + c.RetryProvider = r + } +} + +// WithCodeRetryability allows a user to add additional retryable or non-retryable error codes. +// Each error code is mapped to a regexp which must match the error message to be retryable. +func WithCodeRetryability(cr map[int]Retryability) ConfigOption { + return func(c *Config) { + for code, retryability := range cr { + // Non-retryable errors do not need a regex to check against. + var re *regexp.Regexp + if retryability.Retryable { + re = regexp.MustCompile(retryability.Pattern) + } + // If timeout for this retryable error was not specified, assume default. + to := defaultTimeout + if retryability.Timeout > 0 { + to = retryability.Timeout + } + c.codeRetryability[code] = Retryability{ + Retryable: retryability.Retryable, + regex: re, + Timeout: to, + } + } + } +} + +// WithTimeout allows a user to override default operation timeout. +func WithTimeout(to time.Duration) ConfigOption { + return func(c *Config) { + c.timeout = to + } +} + +// WithLogger allows a user to specify a custom logger. +func WithLogger(l Logger) ConfigOption { + return func(c *Config) { + c.Logger.logger = l + } +} + +// WithContextLogger allows a user to specify a custom context logger. +func WithContextLogger(l ContextLogger) ConfigOption { + return func(c *Config) { + c.Logger = l + } +} + +// WithBasePath allows a base path to be overridden. +func WithBasePath(b string) ConfigOption { + return func(c *Config) { + c.BasePath = b + } +} + +// WithHeader allows aribitrary HTTP headers to be addded to requests. Not all headers +// (e.g., "Content-Type") can be overridden. To set the User-Agent header, use WithUserAgent(). +func WithHeader(header, value string) ConfigOption { + return func(c *Config) { + if c.header == nil { + c.header = make(http.Header) + } + c.header.Add(header, value) + } +} + +// WithUserAgent allows a user to specify a custom user-agent. +func WithUserAgent(ua string) ConfigOption { + return func(c *Config) { + c.userAgent = ua + } +} + +// WithContentType allows a user to override the default Content-Type header. +func WithContentType(ct string) ConfigOption { + return func(c *Config) { + c.contentType = ct + } +} + +// WithQueryParams allows a user to override the default query parameters. +func WithQueryParams(ps map[string]string) ConfigOption { + return func(c *Config) { + c.queryParams = ps + } +} + +// WithAPIKey returns a ConfigOption that specifies an API key to be used as the basis for authentication. +func WithAPIKey(apiKey string) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithAPIKey(apiKey)) + } +} + +// WithClientCertSource returns a ConfigOption that specifies a callback function for obtaining a TLS client certificate. +func WithClientCertSource(s option.ClientCertSource) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithClientCertSource(s)) + } +} + +// WithCredentials returns a ConfigOption that authenticates API calls using a caller-supplier Credentials struct. +func WithCredentials(creds *google.Credentials) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithCredentials(creds)) + } +} + +// WithCredentialsFile returns a ConfigOption that authenticates API calls with the given service account or refresh token JSON credentials file. +func WithCredentialsFile(filename string) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithCredentialsFile(filename)) + } +} + +// WithCredentialsJSON returns a ConfigOption that authenticates API calls with the given service account or refresh token JSON credentials. +func WithCredentialsJSON(p []byte) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithCredentialsJSON(p)) + } +} + +// WithHTTPClient returns a ConfigOption that specifies the HTTP client to use as the basis of communications. +// When used, the WithHTTPClient option takes precedent over all other supplied authentication options. +func WithHTTPClient(client *http.Client) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithHTTPClient(client)) + } +} + +// WithBillingProject returns a ConfigOption that specifies the user override project. +// This will be used to set X-Goog-User-Project on API calls. +// This option will be ignored unless WithUserProjectOverride is also used. +func WithBillingProject(project string) ConfigOption { + return func(c *Config) { + c.billingProject = project + } +} + +// WithUserProjectOverride returns a ConfigOption that turns on WithUserProjectOverride. +// This will send the X-Goog-User-Project on API calls. +func WithUserProjectOverride() ConfigOption { + return func(c *Config) { + c.userOverrideProject = true + } +} + +// Logger is an interface for logging requests and responses. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Info(args ...any) + Infof(format string, args ...any) + Warningf(format string, args ...any) + Warning(args ...any) +} + +// ContextLogger is the internal logger implementation. +type ContextLogger struct { + logger Logger +} + +// LoggerLevel is the most basic level that a logger should print. +// Anything at this level or more severe will be printed by this logger. +type LoggerLevel int32 + +const ( + // Fatal will print only Fatal logs. + Fatal LoggerLevel = iota + // Error will print Error and all Fatal logs. + Error + // Warning will print Warning and all Error logs. + Warning + // LoggerInfo will print Info and all Warning logs. + LoggerInfo +) + +// DefaultLogger returns the default logger for the Declarative Client Library. +func DefaultLogger(level LoggerLevel) Logger { + return glogger{level: level} +} + +type glogger struct { + level LoggerLevel +} + +// Fatal records Fatal errors. +func (l glogger) Fatal(args ...any) { + if l.level >= Fatal { + glog.Fatal(args...) + } +} + +// Fatalf records Fatal errors with added arguments. +func (l glogger) Fatalf(format string, args ...any) { + if l.level >= Fatal { + glog.Fatalf(format, HandleLogArgs(args...)...) + } +} + +// Info records Info errors. +func (l glogger) Info(args ...any) { + if l.level >= LoggerInfo { + glog.Info(args...) + } +} + +// Infof records Info errors with added arguments. +func (l glogger) Infof(format string, args ...any) { + if l.level >= LoggerInfo { + glog.Infof(format, HandleLogArgs(args...)...) + } +} + +// Warningf records Warning errors with added arguments. +func (l glogger) Warningf(format string, args ...any) { + if l.level >= Warning { + glog.Warningf(format, HandleLogArgs(args...)...) + } +} + +// Warning records Warning errors. +func (l glogger) Warning(args ...any) { + if l.level >= Warning { + glog.Warning(args...) + } +} + +// Fatal records Fatal errors. +func (l ContextLogger) Fatal(args ...any) { + l.logger.Fatal(args...) +} + +// Fatalf records Fatal errors with added arguments. +func (l ContextLogger) Fatalf(format string, args ...any) { + l.logger.Fatalf(format, HandleLogArgs(args...)...) +} + +// Info records Info errors. +func (l ContextLogger) Info(args ...any) { + l.logger.Info(args...) +} + +// Infof records Info errors with added arguments. +func (l ContextLogger) Infof(format string, args ...any) { + l.logger.Infof(format, HandleLogArgs(args...)...) +} + +// Warningf records Warning errors with added arguments. +func (l ContextLogger) Warningf(format string, args ...any) { + l.logger.Warningf(format, HandleLogArgs(args...)...) +} + +// Warning records Warning errors. +func (l ContextLogger) Warning(args ...any) { + l.logger.Warning(args...) +} + +// FatalWithContext records Fatal errors with context values. +func (l ContextLogger) FatalWithContext(ctx context.Context, args ...any) { + args = append([]any{ConstructLogPrefixFromContext(ctx)}, args...) + l.Fatal(args...) +} + +// FatalWithContextf records Fatal errors with added arguments with context values. +func (l ContextLogger) FatalWithContextf(ctx context.Context, format string, args ...any) { + format = fmt.Sprintf("%s %s", ConstructLogPrefixFromContext(ctx), format) + l.Fatalf(format, args...) +} + +// InfoWithContext records Info errors with context values. +func (l ContextLogger) InfoWithContext(ctx context.Context, args ...any) { + args = append([]any{ConstructLogPrefixFromContext(ctx)}, args...) + l.Info(args...) +} + +// InfoWithContextf records Info errors with added arguments with context values. +func (l ContextLogger) InfoWithContextf(ctx context.Context, format string, args ...any) { + format = fmt.Sprintf("%s %s", ConstructLogPrefixFromContext(ctx), format) + l.Infof(format, args...) +} + +// WarningWithContextf records Warning errors with added arguments with context values. +func (l ContextLogger) WarningWithContextf(ctx context.Context, format string, args ...any) { + format = fmt.Sprintf("%s %s", ConstructLogPrefixFromContext(ctx), format) + l.Warningf(format, HandleLogArgs(args...)...) +} + +// WarningWithContext records Warning errors with context values. +func (l ContextLogger) WarningWithContext(ctx context.Context, args ...any) { + args = append([]any{ConstructLogPrefixFromContext(ctx)}, args...) + l.Warning(args...) +} + +// HandleLogArgs ensures that pointer arguments are dereferenced well. +func HandleLogArgs(args ...any) []any { + a := make([]any, len(args)) + for i, v := range args { + if s, ok := v.(*string); ok && s != nil { + a[i] = *s + } else { + a[i] = v + } + } + return a +} + +// ConstructLogPrefixFromContext constructs log prefix from info in context +func ConstructLogPrefixFromContext(ctx context.Context) string { + return fmt.Sprintf("[RequestID:%s] ", APIRequestID(ctx)) +} + +// RandomString generates a random alpha-numeric string of input length. +func RandomString(length int) string { + charset := "abcdefghijklmnoqrstuvwxyz0123456789" + var seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +// CreateAPIRequestID creates a random APIRequestId. +func CreateAPIRequestID() string { + return RandomString(8) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/context.go b/mmv1/third_party/terraform/tpgdclresource/context.go new file mode 100755 index 000000000000..9a79dd9f0bf2 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/context.go @@ -0,0 +1,56 @@ +package tpgdclresource + +import ( + "context" + "fmt" + + glog "github.com/golang/glog" +) + +// ReqCtxKey is the key type for storing values in the context. +// Context requires custom type key. +type ReqCtxKey string + +// Keys used in context Value. +const ( + DoNotLogRequestsKey ReqCtxKey = "DoNotLogRequestsKey" + APIRequestIDKey ReqCtxKey = "APIRequestIDKey" +) + +// APIRequestID returns the RequestID for the API call. +// APIRequestID is supposed to be used in log to help with debugging +// Since we do not want explicit error handling everywhere we are logging, so not throwing error. +// Its okay to print empty requestID in worse scenario. +func APIRequestID(ctx context.Context) string { + val := ctx.Value(APIRequestIDKey) + if val == nil { + return "" + } + requestID, ok := val.(string) + if !ok { + glog.Warning("Could not convert APIRequestID val to string") + return "" + } + return requestID +} + +// ShouldLogRequest returns true if the request should be logged. +func ShouldLogRequest(ctx context.Context) (bool, error) { + val := ctx.Value(DoNotLogRequestsKey) + if val == nil { + return true, nil + } + doNotLog, ok := val.(bool) + if !ok { + return false, fmt.Errorf("could not convert DoNotLogRequests value to bool") + } + return !doNotLog, nil +} + +// ContextWithRequestID adds APIRequestID to ctx if APIRequestID is not present. +func ContextWithRequestID(ctx context.Context) context.Context { + if APIRequestID(ctx) != "" { + return ctx + } + return context.WithValue(ctx, APIRequestIDKey, CreateAPIRequestID()) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/dcl.go b/mmv1/third_party/terraform/tpgdclresource/dcl.go index ad5c5f2ce219..e99e66c1af2c 100644 --- a/mmv1/third_party/terraform/tpgdclresource/dcl.go +++ b/mmv1/third_party/terraform/tpgdclresource/dcl.go @@ -1,20 +1,16 @@ package tpgdclresource -import ( - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - var ( // CreateDirective restricts Apply to creating resources for Create - CreateDirective = []dcl.ApplyOption{ - dcl.WithLifecycleParam(dcl.BlockAcquire), - dcl.WithLifecycleParam(dcl.BlockDestruction), - dcl.WithLifecycleParam(dcl.BlockModification), + CreateDirective = []ApplyOption{ + WithLifecycleParam(BlockAcquire), + WithLifecycleParam(BlockDestruction), + WithLifecycleParam(BlockModification), } // UpdateDirective restricts Apply to modifying resources for Update - UpdateDirective = []dcl.ApplyOption{ - dcl.WithLifecycleParam(dcl.BlockCreation), - dcl.WithLifecycleParam(dcl.BlockDestruction), + UpdateDirective = []ApplyOption{ + WithLifecycleParam(BlockCreation), + WithLifecycleParam(BlockDestruction), } ) diff --git a/mmv1/third_party/terraform/tpgdclresource/declarative.go b/mmv1/third_party/terraform/tpgdclresource/declarative.go new file mode 100755 index 000000000000..8362540aeffa --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/declarative.go @@ -0,0 +1,370 @@ +package tpgdclresource + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/kylelemons/godebug/pretty" +) + +// LifecycleParam is used to specify what operations are acceptable. +type LifecycleParam int + +const ( + // BlockDestruction blocks deleting the resource. This will + // cause some Apply operations to be impossible - some fields + // cannot be modified and require a destroy/recreate. + BlockDestruction LifecycleParam = iota + // BlockAcquire will error if the resource already exists. + BlockAcquire + // BlockCreation will error if the resource does not exist. + BlockCreation + // BlockModification will error if the resource is not in the desired state. + BlockModification + // IgnoreIfMissing does not create (and does not error) if the resource + // does not exist. + IgnoreIfMissing + // NoopOnDestroy does not destroy the resource, even if Delete() is + // called. + NoopOnDestroy +) + +// HasLifecycleParam returns whether the given slice has the requested param. +func HasLifecycleParam(lps []LifecycleParam, p LifecycleParam) bool { + for _, lp := range lps { + if lp == p { + return true + } + } + return false +} + +// SprintResourceCompact prints a struct into a compact single line string. +func SprintResourceCompact(v any) string { + prettyConfig := &pretty.Config{ + Compact: true, + IncludeUnexported: true, + } + return prettyConfig.Sprint(v) +} + +// SprintResource prints a struct into a multiline string to display to readers. +func SprintResource(v any) string { + prettyConfig := &pretty.Config{ + Diffable: true, // add line between braces and first/last val + IncludeUnexported: true, + } + return prettyConfig.Sprint(v) +} + +// EmptyValue returns an empty value to exclude PARAMETER-type values from +// being expanded +func EmptyValue() (map[string]any, error) { + return nil, nil +} + +/* + * + * Default Flatten functions + * + * Flatten functions are expected to return the value stored in the interface{}, + * returning a zero value otherwise. For primitive types and arrays that's a nil, + * but for types like time.Time it may be an empty object. + * + * Flattens need to consider the behaviour of https://developers.google.com/discovery/v1/type-format + * and of https://golang.org/pkg/encoding/json/#Unmarshal. As well, values may + * get inserted into the JSON map with their correct types already by decoders. + */ + +// FlattenInteger turns an interface pointing to an arbitary type into *in64, +// taking into account that it may have been represented as various types per +// https://developers.google.com/discovery/v1/type-format. +// string, float64, int64, and int64 values will return a *int64. +// nil and unrecognised types will return 0. +func FlattenInteger(v any) *int64 { + if v == nil { + return nil + } + // int64 values are represented as strings + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return &intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int64(floatVal) + return &intVal + } + + // ints won't appear per https://golang.org/pkg/encoding/json/#Unmarshal + // but may get inserted by decoders in unmarshalResource + if int32Val, ok := v.(int32); ok { + intVal := int64(int32Val) + return &intVal + } + + if intVal, ok := v.(int64); ok { + return &intVal + } + + if machineIntVal, ok := v.(int); ok { + intVal := int64(machineIntVal) + return &intVal + } + + return Int64(0) +} + +// FlattenDouble asserts that an interface is a float64 and returns a pointer to it, +// or to 0.0 if the value is invalid. +func FlattenDouble(v any) *float64 { + if v == nil { + return nil + } + p, ok := v.(float64) + if !ok { + return Float64(0.0) + } + + return &p +} + +// FlattenKeyValuePairs asserts that an interface is a map[string]string and +// returns it, or an empty map if the value is invalid. +func FlattenKeyValuePairs(v any) map[string]string { + if v == nil { + return nil + } + if ss, ok := v.(map[string]string); ok { + return ss + } + p, ok := v.(map[string]any) + if !ok { + return map[string]string{} + } + + return assertStringMap(p) +} + +// FlattenKeyValueInterface returns a pointer to an interface. +// It can only be used for untyped maps. +func FlattenKeyValueInterface(v any) map[string]any { + if v == nil { + return nil + } + + if ss, ok := v.(map[string]any); ok { + return ss + } + + return map[string]any{} +} + +// Returns a map[string]string from a map[string]interface{} +// Non-string values are skipped. +func assertStringMap(mi map[string]any) map[string]string { + ms := make(map[string]string) + for k, v := range mi { + if v == nil { + continue + } + + if s, ok := v.(string); ok { + ms[k] = s + } + } + return ms +} + +// FlattenFloatSlice asserts that an interface is a []float64 and returns +// it. +func FlattenFloatSlice(v any) []float64 { + if v == nil { + return nil + } + p, ok := v.([]any) + if !ok { + return []float64{} + } + + return assertFloatSlice(p) +} + +// Returns a []float64 from an []interface +// Non-float values are skipped. +func assertFloatSlice(id []any) []float64 { + dd := []float64{} + for _, v := range id { + if v == nil { + continue + } + + if d, ok := v.(float64); ok { + dd = append(dd, d) + } + } + + return dd +} + +// FlattenIntSlice asserts that an interface is a []int and returns +// it. +func FlattenIntSlice(v any) []int64 { + if v == nil { + return nil + } + p, ok := v.([]any) + if !ok { + return []int64{} + } + + return assertIntSlice(p) +} + +// Returns a []int64 from an []interface +// Non-int values are skipped. +func assertIntSlice(id []any) []int64 { + dd := []int64{} + for _, v := range id { + if v == nil { + continue + } + if f, ok := v.(float64); ok { + dd = append(dd, int64(f)) + continue + } + if d, ok := v.(int64); ok { + dd = append(dd, d) + } + } + + return dd +} + +// FlattenStringSlice asserts that an interface is a []string and returns +// it. +func FlattenStringSlice(v any) []string { + if v == nil { + return nil + } + p, ok := v.([]any) + if !ok { + return []string{} + } + + return assertStringSlice(p) +} + +// Returns a []string from an []interface +// Non-string values are skipped. +func assertStringSlice(is []any) []string { + ss := []string{} + for _, v := range is { + if v == nil { + continue + } + + if s, ok := v.(string); ok { + ss = append(ss, s) + } + } + + return ss +} + +// FlattenString asserts that an interface is a string and returns a pointer to +// it, or to the empty string if the value is invalid. +func FlattenString(v any) *string { + if v == nil { + return nil + } + p, ok := v.(string) + if !ok { + return String("") + } + + return &p +} + +// FlattenBool asserts that an interface is a bool and returns a pointer to it, or +// a pointer to false if the value is invalid. +func FlattenBool(v any) *bool { + if v == nil { + return nil + } + p, ok := v.(bool) + if !ok { + return Bool(false) + } + + return &p +} + +// FlattenTime asserts that an interface is a time.Time and returns it. +// Time values transmitted in JSON will be an RFC3339 time as per +// https://developers.google.com/discovery/v1/type-format +// Otherwise, it returns the empty time. +func FlattenTime(v any) time.Time { + if s, ok := v.(string); ok { + t, err := time.Parse(time.RFC3339, s) + if err == nil { + return t + } + } + + // In case we inject a time.Time in custom code, convert it. + if p, ok := v.(time.Time); ok { + return p + } + + return time.Time{} +} + +// FlattenSecretValue behaves the same way as FlattenString, except that it +// returns nil if the value is not present. +func FlattenSecretValue(v any) *string { + p, ok := v.(string) + if !ok { + return nil + } + + return &p + +} + +// ExtractElementFromList takes in bytes corresponding to a json object of the structure +// { "listKey": [{"foo": "bar", ...}, {"foo": "baz", ...}] } +// and returns the first element for which isElement returns true. +// isElement operates on the serialized json representation of each element - +// to the extent that json.Marshal(json.Unmarshal(X)) != X, this may differ from the +// actual elements in the input bytes - but this should be exclusively +// differences which are not semantically significant in json. +func ExtractElementFromList(b []byte, listKey string, isElement func([]byte) bool) ([]byte, error) { + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + l, ok := m[listKey] + if !ok { + return nil, NotFoundError{Cause: fmt.Errorf("could not find %q in %v, assuming list is empty and returning not found", listKey, m)} + } + list, ok := l.([]any) + if !ok { + return nil, fmt.Errorf("could not convert %v to list", l) + } + for _, v := range list { + if subM, ok := v.(map[string]any); ok { + if subB, err := json.Marshal(subM); err != nil { + continue + } else if isElement(subB) { + return subB, nil + } + } + } + // Return a 404-style error. + return nil, NotFoundError{Cause: fmt.Errorf("could not find a match in %v", list)} +} diff --git a/mmv1/third_party/terraform/tpgdclresource/diff.go b/mmv1/third_party/terraform/tpgdclresource/diff.go new file mode 100755 index 000000000000..fcfe90bede51 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/diff.go @@ -0,0 +1,572 @@ +package tpgdclresource + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// DiffInfo is a struct that contains all information about the diff that's about to occur. +type DiffInfo struct { + // Ignore + OutputOnly cause the diff checker to always return no-diff. + Ignore bool + OutputOnly bool + ServerDefault bool + MergeNestedDiffs bool + IgnoredPrefixes []string + Type string + + // ObjectFunction is the function used to diff a Nested Object. + ObjectFunction func(desired, actual any, fn FieldName) ([]*FieldDiff, error) + + // CustomDiff is used to handle diffing a field when normal diff functions will not suffice. + // It should return false if there is any diff between 'desired' and 'actual'. + CustomDiff func(desired, actual any) bool + + // OperationSelector takes in the field's diff and returns the name of the operation (or Recreate) that should be triggered. + OperationSelector func(d *FieldDiff) []string + + EmptyObject any +} + +// FieldName is used to add information about a field's name for logging purposes. +type FieldName struct { + FieldName string +} + +// AddIndex adds an index to a FieldName and returns the same item. +// Info is always pass-by-value, so the original field name still exists. +func (i FieldName) AddIndex(index int) FieldName { + newInfo := i + newInfo.FieldName = newInfo.FieldName + fmt.Sprintf("[%v]", index) + return newInfo +} + +// AddNest adds an index to a FieldName and returns the same item. +// Info is always pass-by-value, so the original field name still exists. +func (i FieldName) AddNest(field string) FieldName { + newInfo := i + if i.FieldName == "" { + newInfo.FieldName = field + } else { + newInfo.FieldName = newInfo.FieldName + fmt.Sprintf(".%s", field) + } + return newInfo +} + +// FieldDiff contains all information about a diff that exists in the resource. +type FieldDiff struct { + FieldName string + Message string + Desired any + Actual any + + ToAdd []any + ToRemove []any + + // The name of the operation that should result (may be Recreate) + // In the case of sets, more than one operation may be returned. + ResultingOperation []string +} + +func (d *FieldDiff) String() string { + if d.Message != "" { + return fmt.Sprintf("Field %s diff: %s", d.FieldName, d.Message) + } else if len(d.ToAdd) != 0 || len(d.ToRemove) != 0 { + return fmt.Sprintf("Field %s: add %v, remove %v", d.FieldName, d.ToAdd, d.ToRemove) + } + return fmt.Sprintf("Field %s: got %s, want %s", d.FieldName, SprintResourceCompact(d.Actual), SprintResourceCompact(d.Desired)) +} + +func stringValue(i any) string { + v := reflect.ValueOf(i) + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return "nil" + } + return fmt.Sprintf("%v", reflect.Indirect(v)) + } + return fmt.Sprintf("%v", i) +} + +// Diff takes in two interfaces and diffs them according to Info. +func Diff(desired, actual any, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + // All Output-only fields should not be diffed. + if info.OutputOnly || info.Ignore { + return nil, nil + } + + // If desired is a zero value, we do not care about the field. + if IsZeroValue(desired) { + return nil, nil + } + + if info.OperationSelector == nil { + return nil, fmt.Errorf("an operation selector function must exist") + } + + desiredType := ValueType(desired) + + if desiredType == "invalid" { + return nil, nil + } + + if info.CustomDiff != nil { + if !info.CustomDiff(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + addOperationToDiffs(diffs, info) + return diffs, nil + } + + if desiredType == "slice" { + dSlice, iSlice, err := slices(desired, actual) + if err != nil { + return nil, err + } + var arrDiffs []*FieldDiff + if info.Type == "Set" { + arrDiffs, err = setDiff(dSlice, iSlice, info, fn) + } else { + arrDiffs, err = arrayDiff(dSlice, iSlice, info, fn) + } + if err != nil { + return nil, err + } + diffs = append(diffs, arrDiffs...) + addOperationToDiffs(diffs, info) + return diffs, nil + } + + if info.Type == "EnumType" { + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + addOperationToDiffs(diffs, info) + return diffs, nil + } + return nil, nil + } + + switch desiredType { + case "string": + dStr, err := str(desired) + if err != nil { + return nil, err + } + + // Protobufs cannot differentiate between empty primitive values + null. + // If the API returns null or does not return a value for the field and we have set the empty string, those are equivalent. + if IsZeroValue(actual) && *dStr == "" { + return diffs, nil + } + + aStr, err := str(actual) + if err != nil { + return nil, err + } + + if info.Type == "ReferenceType" { + if !StringEqualsWithSelfLink(dStr, aStr) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: dStr, Actual: aStr}) + } + } else { + if !StringCanonicalize(dStr, aStr) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: dStr, Actual: aStr}) + } + } + + case "map": + dMap, aMap, err := maps(desired, actual) + if err != nil { + return nil, err + } + mapDiffs, err := mapCompare(dMap, aMap, info.IgnoredPrefixes, info, fn) + if err != nil { + return nil, err + } + if len(mapDiffs) > 0 { + diffs = append(diffs, mapDiffs...) + } + + case "int64": + dInt, err := makeint64(desired) + if err != nil { + return nil, err + } + + // 0 is the empty value for integers. + if IsZeroValue(actual) && *dInt == 0 { + return diffs, nil + } + + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + + case "int": + dInt, err := makeint(desired) + if err != nil { + return nil, err + } + + // 0 is the empty value for integers. + if IsZeroValue(actual) && *dInt == 0 { + return diffs, nil + } + + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + + case "float64": + dFloat, err := makefloat64(desired) + if err != nil { + return nil, err + } + + // 0 is the empty value for integers. + if IsZeroValue(actual) && *dFloat == 0.0 { + return diffs, nil + } + + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + + case "bool": + dBool, aBool, err := bools(desired, actual) + if err != nil { + return nil, err + } + if !BoolCanonicalize(dBool, aBool) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: dBool, Actual: aBool}) + } + + case "struct": + // If API returns nil (which means field is unset) && we have the empty-struct, no diff occurs. + if IsZeroValue(actual) && IsEmptyValueIndirect(desired) { + return nil, nil + } + + // Want empty value, but non-empty value currrently exists. + // Only consider *explicitly* empty values, rather than "some combination + // of nils and falses" (as IEVI would do), because of the case comparing + // a non-explicitly empty struct with a struct containing only computed fields. + // See compute's `validate_test.go` for example. + if hasEmptyStructField(desired) && !IsEmptyValueIndirect(actual) { + if info.ServerDefault { + // The API can return values where none are in the desired state. + return nil, nil + } + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + addOperationToDiffs(diffs, info) + return diffs, nil + } + + if info.ObjectFunction == nil { + return nil, fmt.Errorf("struct %v given without an object function", desired) + } + + if info.EmptyObject == nil { + return nil, fmt.Errorf("struct %v given without an empty object type", desired) + } + + // If the API returns nil, we can't diff against a nil. We should use the empty object instead. + // This is because the user could write out a config that is functionally equivalent to the empty object (contains all 0s and ""), + // but is not technically the empty object. + if actual == nil || ValueType(actual) == "invalid" { + actual = info.EmptyObject + } + + ds, err := info.ObjectFunction(desired, actual, fn) + if err != nil { + return nil, err + } + if info.MergeNestedDiffs { + // Replace any nested diffs with a recreate operation with a diff in this field. + nonRecreateCount := 0 + for _, d := range ds { + if len(d.ResultingOperation) == 0 { + return nil, fmt.Errorf("diff found in field %q with no operation", d.FieldName) + } + if d.ResultingOperation[0] != "Recreate" { + ds[nonRecreateCount] = d + nonRecreateCount++ + } + } + if nonRecreateCount < len(ds) { + // At least one nested diff requires a recreate. + ds[nonRecreateCount] = &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual} + nonRecreateCount++ + } + ds = ds[:nonRecreateCount] + } + diffs = append(diffs, ds...) + default: + return nil, fmt.Errorf("no diffing logic exists for type: %q", desiredType) + } + + addOperationToDiffs(diffs, info) + return diffs, nil +} + +func arrayDiff(desired, actual []any, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + + // Nothing to diff against. + if actual == nil { + return diffs, nil + } + + if len(desired) != len(actual) && !IsZeroValue(desired) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Message: fmt.Sprintf("different lengths: desired %d, actual %d", len(desired), len(actual))}) + return diffs, nil + } + + for i, dItem := range desired { + aItem := actual[i] + diff, err := Diff(dItem, aItem, info, fn.AddIndex(i)) + if err != nil { + return nil, err + } + if diff != nil { + diffs = append(diffs, diff...) + } + } + return diffs, nil +} + +func setDiff(desired, actual []any, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + + // Everything should be added. + if actual == nil { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, ToAdd: desired}) + return diffs, nil + } + + var toAdd, toRemove []any + + for i, aItem := range actual { + found := false + for _, desItem := range desired { + if ds, _ := Diff(desItem, aItem, info, fn.AddIndex(i)); len(ds) == 0 { + found = true + break + } + } + if !found { + toRemove = append(toRemove, aItem) + } + } + + for i, dItem := range desired { + found := false + for _, actItem := range actual { + if ds, _ := Diff(dItem, actItem, info, fn.AddIndex(i)); len(ds) == 0 { + found = true + break + } + } + if !found { + toAdd = append(toAdd, dItem) + } + } + + if len(toAdd) > 0 || len(toRemove) > 0 { + return []*FieldDiff{&FieldDiff{FieldName: fn.FieldName, ToAdd: toAdd, ToRemove: toRemove}}, nil + } + return nil, nil +} + +// ValueType returns the reflect-style kind of an interface or the underlying type of a pointer. +func ValueType(i any) string { + if reflect.ValueOf(i).Kind() == reflect.Ptr { + return reflect.Indirect(reflect.ValueOf(i)).Kind().String() + } + return reflect.ValueOf(i).Kind().String() +} + +func strs(d, i any) (*string, *string, error) { + dStr, err := str(d) + if err != nil { + return nil, nil, err + } + + iStr, err := str(i) + if err != nil { + return nil, nil, err + } + + return dStr, iStr, nil +} + +func str(d any) (*string, error) { + dPtr, dOk := d.(*string) + if !dOk { + dStr, dOk2 := d.(string) + if !dOk2 { + return nil, fmt.Errorf("was given non string %v", d) + } + dPtr = String(dStr) + } + return dPtr, nil +} + +func makeint64(d any) (*int64, error) { + dPtr, dOk := d.(*int64) + if !dOk { + dInt, dOk2 := d.(int64) + if !dOk2 { + return nil, fmt.Errorf("was given non int64 %v", d) + } + dPtr = Int64(dInt) + } + return dPtr, nil +} + +func makeint(d any) (*int, error) { + dPtr, dOk := d.(*int) + if !dOk { + dInt, dOk2 := d.(int) + if !dOk2 { + return nil, fmt.Errorf("was given non int %v", d) + } + dPtr = &dInt + } + return dPtr, nil +} + +func makefloat64(d any) (*float64, error) { + dPtr, dOk := d.(*float64) + if !dOk { + dFloat, dOk2 := d.(float64) + if !dOk2 { + return nil, fmt.Errorf("was given non float64 %v", d) + } + dPtr = &dFloat + } + return dPtr, nil +} + +func bools(d, i any) (*bool, *bool, error) { + dBool, err := boolean(d) + if err != nil { + return nil, nil, err + } + + iBool, err := boolean(i) + if err != nil { + return nil, nil, err + } + + return dBool, iBool, nil +} + +func boolean(d any) (*bool, error) { + dPtr, dOk := d.(*bool) + if !dOk { + return nil, nil + } + return dPtr, nil +} + +func maps(d, a any) (map[string]any, map[string]any, error) { + dMap, _ := mapCast(d) + aMap, _ := mapCast(a) + return dMap, aMap, nil +} + +func mapCast(m any) (map[string]any, error) { + j, err := json.Marshal(m) + if err != nil { + return nil, err + } + + var mi map[string]any + json.Unmarshal(j, &mi) + return mi, nil +} + +func slices(d, i any) ([]any, []any, error) { + dSlice, err := slice(d) + if err != nil { + return nil, nil, err + } + + iSlice, err := slice(i) + if err != nil { + return nil, nil, err + } + + return dSlice, iSlice, nil +} + +func slice(slice any) ([]any, error) { + // Keep the distinction between nil and empty slice input + // This isn't going to be an error though. + if slice == nil { + return nil, nil + } + + s := reflect.ValueOf(slice) + + ret := make([]any, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, nil +} + +func addOperationToDiffs(fds []*FieldDiff, i DiffInfo) { + for _, fd := range fds { + // Do not overwrite update operations on nested fields with parent field operations. + if len(fd.ResultingOperation) == 0 { + fd.ResultingOperation = i.OperationSelector(fd) + } + } +} + +func mapCompare(d, a map[string]any, ignorePrefixes []string, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + for k, v := range d { + if isIgnored(k, ignorePrefixes) { + continue + } + + av, ok := a[k] + if !ok { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Message: fmt.Sprintf("%v is missing from actual", k)}) + continue + } + + objDiffs, err := Diff(v, av, info, fn) + if err != nil { + return nil, err + } + diffs = append(diffs, objDiffs...) + } + + for k, v := range a { + if isIgnored(k, ignorePrefixes) { + continue + } + + dv, ok := d[k] + if !ok { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Message: fmt.Sprintf("%v is missing from desired", k)}) + continue + } + + objDiffs, err := Diff(dv, v, info, fn) + if err != nil { + return nil, err + } + diffs = append(diffs, objDiffs...) + } + + return diffs, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/diff_utils.go b/mmv1/third_party/terraform/tpgdclresource/diff_utils.go new file mode 100755 index 000000000000..e650b6c90252 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/diff_utils.go @@ -0,0 +1,11 @@ +package tpgdclresource + +// RequiresRecreate is for Operations that require recreating. +func RequiresRecreate() func(d *FieldDiff) []string { + return func(d *FieldDiff) []string { return []string{"Recreate"} } +} + +// TriggersOperation is used to tell the diff checker to trigger an operation. +func TriggersOperation(op string) func(d *FieldDiff) []string { + return func(d *FieldDiff) []string { return []string{op} } +} diff --git a/mmv1/third_party/terraform/tpgdclresource/errors.go b/mmv1/third_party/terraform/tpgdclresource/errors.go new file mode 100755 index 000000000000..8eb110c0f169 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/errors.go @@ -0,0 +1,142 @@ +package tpgdclresource + +import ( + "fmt" + "time" + + "google.golang.org/api/googleapi" +) + +// NotFoundError is returned when a resource does not exist. +// Some APIs will also return it if a resource may exist but +// the current user does not have permission to view it. +// It wraps an error, usually a *googleapi.Error. +// It maps to HTTP 404. +type NotFoundError struct { + Cause error +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf("not found: %s", e.Cause) +} + +// HasCode returns true if the given error is an HTTP response with the given code. +func HasCode(err error, code int) bool { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == code { + return true + } + } + return false +} + +// IsNotFound returns true if the given error is a NotFoundError or is an HTTP 404. +func IsNotFound(err error) bool { + if _, ok := err.(NotFoundError); ok { + return true + } + return HasCode(err, 404) +} + +// IsNotFoundOrCode returns true if the given error is a NotFoundError, an HTTP 404, +// or an HTTP response with the given code. +func IsNotFoundOrCode(err error, code int) bool { + return IsNotFound(err) || HasCode(err, code) +} + +// EnumInvalidError is returned when an enum is set (by a client) to a string +// value that is not valid for that enum. +// It maps to HTTP 400, although it is usually generated client-side before +// the enum is sent to the server. +type EnumInvalidError struct { + Enum string + Value string + Valid []string +} + +func (e EnumInvalidError) Error() string { + return fmt.Sprintf("%s not a valid %s (%v)", e.Value, e.Enum, e.Valid) +} + +// NotDeletedError is returned when the resource should be deleted but has not +// been. It is returned if the operation to delete the resource has apparently +// been successful, but Get() still fetches the resource successfully. +type NotDeletedError struct { + ExistingResource any +} + +func (e NotDeletedError) Error() string { + return fmt.Sprintf("resource not successfully deleted: %#v.", e.ExistingResource) +} + +// IsRetryableGoogleError returns true if the error is retryable according to the given retryability. +func IsRetryableGoogleError(gerr *googleapi.Error, retryability Retryability, start time.Time) bool { + return retryability.Retryable && retryability.regex.MatchString(gerr.Message) && time.Since(start) < retryability.Timeout +} + +// IsRetryableHTTPError returns true if the error is retryable - in GCP that's a 500, 502, 503, or 429. +func IsRetryableHTTPError(err error, retryability map[int]Retryability, start time.Time) bool { + if gerr, ok := err.(*googleapi.Error); ok { + rtblt, ok := retryability[gerr.Code] + return ok && IsRetryableGoogleError(gerr, rtblt, start) + } + return false +} + +// IsNonRetryableHTTPError returns true if we know that the error is not retryable - in GCP that's a 400, 403, 404, or 409. +func IsNonRetryableHTTPError(err error, retryability map[int]Retryability, start time.Time) bool { + if gerr, ok := err.(*googleapi.Error); ok { + rtblt, ok := retryability[gerr.Code] + return ok && !IsRetryableGoogleError(gerr, rtblt, start) + } + return false +} + +// IsConflictError returns true if the error has conflict error code 409. +func IsConflictError(err error) bool { + if gerr, ok := err.(*googleapi.Error); ok { + return gerr.Code == 409 + } + return false +} + +// ApplyInfeasibleError is returned when lifecycle directives prevent an Apply from proceeding. +// This error means that no imperative requests were issued. +type ApplyInfeasibleError struct { + Message string +} + +func (e ApplyInfeasibleError) Error() string { + return e.Message +} + +// DiffAfterApplyError is returned when there are differences between the desired state and the +// intended state after Apply completes. This usually indicates an error in the SDK, probably +// related to a failure to canonicalize properly. +type DiffAfterApplyError struct { + Diffs []string +} + +func (e DiffAfterApplyError) Error() string { + return fmt.Sprintf("diffs exist after apply: %v", e.Diffs) +} + +// OperationNotDone is returned when an API operation hasn't completed. +// It may wrap an error if the error means that the operation can be retried. +type OperationNotDone struct { + Err error +} + +func (e OperationNotDone) Error() string { + return "operation not done." +} + +// AttemptToIndexNilArray is returned when GetMapEntry is called with a path that includes an array +// index and that array is unset in the map. +type AttemptToIndexNilArray struct { + FieldName string +} + +func (e AttemptToIndexNilArray) Error() string { + return fmt.Sprintf("field %s was nil, could not index array", e.FieldName) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/flatten.go b/mmv1/third_party/terraform/tpgdclresource/flatten.go new file mode 100755 index 000000000000..6d58d5b6863e --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/flatten.go @@ -0,0 +1,70 @@ +package tpgdclresource + +import ( + "strings" +) + +// SelfLinkToName returns the element of a string after the last slash. +func SelfLinkToName(v *string) *string { + if v == nil { + return nil + } + val := *v + comp := strings.Split(val, "/") + ret := comp[len(comp)-1] + return &ret +} + +// SelfLinkToNameExpander returns the element of a string after the last slash. +// Return value also has error since the dcl template requires the expander to return error. +func SelfLinkToNameExpander(v *string) (*string, error) { + return SelfLinkToName(v), nil +} + +// SelfLinkToNameArrayExpander returns the last element of each string in a slice after the last slash. +// Return value also has error since the dcl template requires the expander to return error. +func SelfLinkToNameArrayExpander(v []string) ([]string, error) { + r := make([]string, len(v)) + for i, w := range v { + r[i] = *SelfLinkToName(&w) + } + return r, nil +} + +// FalseToNil returns nil if the pointed-to boolean is 'false' - otherwise returns the pass-in pointer. +func FalseToNil(b *bool) (*bool, error) { + if b != nil && *b == false { + return nil, nil + } + return b, nil +} + +// SelfLinkToNameArray returns a slice of the elements of a slice of strings after the last slash. +func SelfLinkToNameArray(v []string) []string { + var a []string + for _, vv := range v { + ret := SelfLinkToName(&vv) + if ret != nil { + a = append(a, *ret) + } + } + return a +} + +// SelfLinkToNameWithPattern handles when the resource name can have `/` in it +// by matching the pattern. +func SelfLinkToNameWithPattern(v *string, pattern string) *string { + if v == nil { + return nil + } + regex, err := regexFromPattern(pattern) + if err != nil { + // Unable to compile regex, best guess return v + return v + } + matches := regex.FindStringSubmatch(*v) + if len(matches) == 0 { + return v + } + return &matches[len(matches)-1] +} diff --git a/mmv1/third_party/terraform/tpgdclresource/locations.go b/mmv1/third_party/terraform/tpgdclresource/locations.go new file mode 100755 index 000000000000..c842be1dffde --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/locations.go @@ -0,0 +1,23 @@ +package tpgdclresource + +import "regexp" + +// IsRegion returns true if this string refers to a GCP region. +func IsRegion(s *string) bool { + if s == nil { + return false + } + + r := regexp.MustCompile(`^[a-z]+-[a-z]+[0-9]+$`) + return r.MatchString(*s) +} + +// IsZone returns true if this string refers to a GCP zone. +func IsZone(s *string) bool { + if s == nil { + return false + } + + r := regexp.MustCompile(`^[a-z]+-[a-z]+[0-9]+-(ai[0-9]+)?[a-z]+$`) + return r.MatchString(*s) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/marshallers.go b/mmv1/third_party/terraform/tpgdclresource/marshallers.go new file mode 100755 index 000000000000..42540ff8d22a --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/marshallers.go @@ -0,0 +1,333 @@ +package tpgdclresource + +import ( + "encoding/json" + "fmt" + "reflect" + re "regexp" + "strconv" + "strings" + + glog "github.com/golang/glog" +) + +// MoveMapEntry moves the entry at `from` to `to`. `from` and `to` are slices +// of string keys. Each key except the last must refer to a map[string]interface{} +// in m - we will descend into m following those keys. If the maps at the levels +// above the target are empty after the move, they will be deleted. If there +// are no maps along the path to `to`, they will be created. If a map above +// the level of the target is missing, nothing will be done. If the map exists +// but `target` is not present, `nil` will be inserted at `to`. +func MoveMapEntry(m map[string]any, from, to []string) error { + fetch := m + // All elements before the last must point to a map[string]interface{} - + // this ranges over all those elements, so at the end of this loop, we have + // the map which contains the actual final element to move. + for _, idx := range from[:len(from)-1] { + f, ok := fetch[idx] + if !ok { + // Nothing to move, so it's not an error not to move it. + return nil + } + fetch, ok = f.(map[string]any) + if !ok { + return fmt.Errorf("could not fetch %q from %v", idx, fetch) + } + } + value, ok := fetch[from[len(from)-1]] + if !ok { + value = nil + } + delete(fetch, from[len(from)-1]) + if len(to) > 0 { + fetch = m + for _, idx := range to[:len(to)-1] { + f, ok := fetch[idx] + if !ok { + fetch[idx] = make(map[string]any) + f = fetch[idx] + } + fetch, ok = f.(map[string]any) + if !ok { + return fmt.Errorf("%v is not map[string]interface{}", f) + } + } + fetch[to[len(to)-1]] = value + } + return deleteIfEmpty(m, from) +} + +// GetMapEntry returns the value at `path` from `m`, following the same rules as +// `MoveMapEntry` except that a missing map or value is an error. +func GetMapEntry(m map[string]any, path []string) (any, error) { + if len(path) == 0 { + return m, nil + } + fetch := m + // All elements before the last must point to a map[string]interface{} - + // this ranges over all those elements, so at the end of this loop, we have + // the map which contains the element to fetch. + for _, idx := range path[:len(path)-1] { + f, err := mapEntry(fetch, idx) + if err != nil { + return nil, err + } + var ok bool + fetch, ok = f.(map[string]any) + if !ok { + return nil, fmt.Errorf("could not fetch %q from %v", idx, fetch) + } + } + + value, err := mapEntry(fetch, path[len(path)-1]) + if err != nil { + return nil, err + } + return value, nil +} + +// mapEntry grabs item from fetch, and indexes into the array if the [num] notation is present. +func mapEntry(fetch map[string]any, item string) (any, error) { + // Check if we're fetching from an array. + arrayRegexp := re.MustCompile(`\[([0-9]*)\]`) + if arrayRegexp.MatchString(item) { + field := strings.Split(item, "[")[0] + items := arrayRegexp.FindAllStringSubmatch(item, 1) + index, err := strconv.Atoi(items[0][1]) + if err != nil { + return nil, err + } + + f, ok := fetch[field] + if !ok { + return nil, fmt.Errorf("could not find %q in %v", item, fetch) + } + + if f == nil { + return nil, &AttemptToIndexNilArray{FieldName: field} + } + + fetch, ok := f.([]any) + if !ok { + return nil, fmt.Errorf("field %s is a %T, not an array", field, f) + } + + if len(fetch) < index { + return nil, fmt.Errorf("field %s only has %v elements, needs %v", field, len(fetch), index) + } + + return fetch[index], nil + } + + f, ok := fetch[item] + if !ok { + return nil, fmt.Errorf("could not find %q in %v", item, fetch) + } + return f, nil +} + +func deleteIfEmpty(m map[string]any, from []string) error { + if len(from) > 1 { + sub, ok := m[from[0]] + if !ok { + return fmt.Errorf("could not fetch %q from %v", from[0], m) + } + smap, ok := sub.(map[string]any) + if !ok { + glog.Warningf("In deleting empty map while marshalling, %v not map[string]interface{}", sub) + return nil + } + deleteIfEmpty(smap, from[1:]) + } + if len(from) >= 1 { + if sub, ok := m[from[0]]; ok { + if subm, ok := sub.(map[string]any); ok && len(subm) == 0 { + delete(m, from[0]) + } + } + } + return nil +} + +// PutMapEntry inserts `item` at `path` into `m` - the inverse of GetMapEntry. +func PutMapEntry(m map[string]any, path []string, item any) error { + if len(path) == 0 { + return fmt.Errorf("cannot insert value at empty path") + } + put := m + // All elements before the last must point to a map[string]interface{} - + // this ranges over all those elements, so at the end of this loop, we have + // the map which contains the element to fetch. + for _, idx := range path[:len(path)-1] { + f, ok := put[idx] + if !ok { + f = make(map[string]any) + put[idx] = f + } + put, ok = f.(map[string]any) + if !ok { + return fmt.Errorf("could not cast %q from %v as map[string]interface{}", idx, put) + } + } + put[path[len(path)-1]] = item + return nil +} + +// MapFromListOfKeyValues turns a [{"key": k, "value": v}, ...] format-map into a normal string-string map. +// This is useful for a handful of GCP APIs which have chosen to represent maps this way. We +// expect relatively few of these in newer APIs - it is explicitly against https://aip.dev/apps/2717 - +// ("such a map is represented by a normal JSON object"). +// That AIP didn't exist at the time of development of, for instance, Compute v1. +func MapFromListOfKeyValues(rawFetch map[string]any, path []string, keyName, valueName string) (map[string]string, error) { + i, err := GetMapEntry(rawFetch, path) + if err != nil { + // If there's nothing there, it's okay to ignore. + glog.Warningf("In converting a map to [{\"key\": k, ...}, ...] format, no entry at %q in %v", path, rawFetch) + return nil, nil + } + il, ok := i.([]any) + if !ok { + return nil, fmt.Errorf("could not cast %v to []interface{}", i) + } + var items []map[string]any + for _, it := range il { + cast, ok := it.(map[string]any) + if !ok { + return nil, fmt.Errorf("could not cast %v to map[string]interface{}", it) + } + items = append(items, cast) + } + + m := make(map[string]string, len(items)) + for _, item := range items { + key, ok := item[keyName].(string) + if !ok { + return nil, fmt.Errorf("could not find 'key' in %v", item) + } + value, ok := item[valueName].(string) + if !ok { + return nil, fmt.Errorf("could not find 'value' in %v", item) + } + m[key] = value + } + return m, nil +} + +// ListOfKeyValuesFromMap is the opposite of MapFromListOfKeyValues, used in marshalling instead of unmarshalling. +func ListOfKeyValuesFromMap(m map[string]string, keyName, valueName string) ([]map[string]string, error) { + var items []map[string]string + for k, v := range m { + items = append(items, map[string]string{ + keyName: k, + valueName: v, + }) + } + return items, nil +} + +// ListOfKeyValuesFromMapInStruct returns the opposite of MapFromListOfKeyValues, except nested inside an struct under the subfield name. +func ListOfKeyValuesFromMapInStruct(m map[string]string, subfieldName, keyName, valueName string) (map[string][]map[string]string, error) { + maps, err := ListOfKeyValuesFromMap(m, keyName, valueName) + if err != nil { + return nil, err + } + return map[string][]map[string]string{ + subfieldName: maps, + }, nil +} + +// ConvertToMap converts the specified object into the map[string]interface{} which can +// be serialized into the same json object as the input object. +func ConvertToMap(obj any) (map[string]any, error) { + var m map[string]any + b, err := json.Marshal(obj) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +// ValueOrEmptyString takes a scalar or pointer to a scalar and returns either the empty string or its value. +func ValueOrEmptyString(i any) string { + if i == nil { + return "" + } + v := reflect.ValueOf(i) + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.IsValid() { + switch v.Kind() { + case reflect.Bool, reflect.Int, reflect.Int64, reflect.Float64, reflect.String: + return fmt.Sprintf("%v", v.Interface()) + } + } + return "" +} + +// ValueOrEmptyInt64 returns the value or the default value if the pointer is nil. +func ValueOrEmptyInt64(s *int64) int64 { + if s == nil { + return 0 + } + return *s +} + +// ValueOrEmptyBool returns the value or the default value if the pointer is nil. +func ValueOrEmptyBool(s *bool) bool { + if s == nil { + return false + } + return *s +} + +// ValueOrEmptyDouble returns the value or the default value if the pointer is nil. +func ValueOrEmptyDouble(s *float64) float64 { + if s == nil { + return 0.0 + } + return *s +} + +// FindStringInArray returns true if value found in array of strings +func FindStringInArray(s string, items []string) bool { + for _, v := range items { + if v == s { + return true + } + } + return false +} + +// ValueFromRegexOnField assigns val to the regex value on containerVal if val is unset +func ValueFromRegexOnField(fieldName string, val *string, containerVal *string, regex string) (*string, error) { + containerGroupedVal := String("") + // Fetch value from container if the container exists. + if containerVal != nil && *containerVal != "" { + r := re.MustCompile(regex) + m := r.FindStringSubmatch(*containerVal) + if m != nil && len(m) >= 2 { + containerGroupedVal = String(m[1]) + } else if val == nil || *val == "" { + // The regex didn't match and the value doesn't exist. + return nil, fmt.Errorf("%s field parent has no matching values from regex %s in value %s", fieldName, regex, *containerVal) + } + } + + // If value exists + different from what's in container, error. + if val != nil && *val != "" { + if containerGroupedVal != nil && *containerGroupedVal != "" && *containerGroupedVal != *val { + return nil, fmt.Errorf("%s field has conflicting values of %s (from parent) and %s (from self)", fieldName, *containerGroupedVal, *val) + } + } + + // If value does not exist, use the value in container. + if val == nil || *val == "" { + return containerGroupedVal, nil + } + + return val, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/operations.go b/mmv1/third_party/terraform/tpgdclresource/operations/operations.go new file mode 100755 index 000000000000..2f3483329523 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/operations.go @@ -0,0 +1,116 @@ +package operations + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// StandardGCPOperation can be parsed from the returned API operation and waited on. +// This is the typical GCP operation. +type StandardGCPOperation struct { + Name string `json:"name"` + Error *StandardGCPOperationError `json:"error"` + Done bool `json:"done"` + Response map[string]any `json:"response"` + // other irrelevant fields omitted + + config *dcl.Config + basePath string + verb string + + response map[string]any +} + +// StandardGCPOperationError is the GCP operation's Error body. +type StandardGCPOperationError struct { + Errors []*StandardGCPOperationErrorError `json:"errors"` + + StandardGCPOperationErrorError +} + +// String formats the StandardGCPOperationError as an error string. +func (e *StandardGCPOperationError) String() string { + if e == nil { + return "nil" + } + var b strings.Builder + for _, err := range e.Errors { + fmt.Fprintf(&b, "error code %q, message: %s, details: %+v\n", err.Code, err.Message, err.Details) + } + + if e.Code != "" { + fmt.Fprintf(&b, "error code %q, message: %s, details: %+v\n", e.Code, e.Message, e.Details) + } + + return b.String() +} + +// StandardGCPOperationErrorError is a singular error in a GCP operation. +type StandardGCPOperationErrorError struct { + Code json.Number `json:"code"` + Message string `json:"message"` + Details []map[string]any `json:"details"` +} + +// Wait waits for an StandardGCPOperation to complete by fetching the operation until it completes. +func (op *StandardGCPOperation) Wait(ctx context.Context, c *dcl.Config, basePath, verb string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + op.basePath = basePath + op.verb = verb + + if len(op.Response) != 0 { + op.response = op.Response + } + if op.Done { + c.Logger.Infof("Completed operation: %v", op) + return nil + } + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *StandardGCPOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := dcl.URL(op.Name, op.basePath, op.config.BasePath, nil) + resp, err := dcl.SendRequest(ctx, op.config, op.verb, u, &bytes.Buffer{}, nil) + if err != nil { + // Since we don't know when this operation started, we will assume the + // context's timeout applies to all request errors. + if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + + if !op.Done { + return nil, dcl.OperationNotDone{} + } + + if op.Error != nil { + return nil, fmt.Errorf("operation received error: %+v details: %v", op.Error, op.Response) + } + + if len(op.response) == 0 && len(op.Response) != 0 { + op.response = op.Response + } + + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *StandardGCPOperation) FirstResponse() (map[string]any, bool) { + return op.response, len(op.response) > 0 +} diff --git a/mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go b/mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go deleted file mode 100644 index 2cf61cf67ad1..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go +++ /dev/null @@ -1,35 +0,0 @@ -package tpgdclresource - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" -) - -// OrgPolicyPolicy has a custom import method because the parent field needs to allow an additional forward slash -// to represent the type of parent (e.g. projects/{project_id}). -func ResourceOrgPolicyPolicyCustomImport(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "^(?P[^/]+/?[^/]*)/policies/(?P[^/]+)", - "^(?P[^/]+/?[^/]*)/(?P[^/]+)", - }, d, config); err != nil { - return err - } - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVarsRecursive(d, config, "{{parent}}/policies/{{name}}", false, 0) - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - // reset name to match the one from resourceOrgPolicyPolicyRead - if err := d.Set("name", id); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(id) - - return nil -} diff --git a/mmv1/third_party/terraform/tpgdclresource/project_id.go b/mmv1/third_party/terraform/tpgdclresource/project_id.go new file mode 100755 index 000000000000..eed01719ba8a --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/project_id.go @@ -0,0 +1,94 @@ +package tpgdclresource + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// This matches either the entire string if it contains no forward slashes or just projects/{project_number}/ if it does. +var projectNumberRegex = regexp.MustCompile(`(^\d+$|projects/\d+|metricsScopes/\d+)`) + +// This matches either the entire string if it contains no forward slashes or just projects/{project_id}/ if it does. +var projectIDRegex = regexp.MustCompile(`(^[^/]+$|projects/[^/]+|metricsScopes/[^/]+)`) + +// ProjectResponse is the response from Cloud Resource Manager. +type ProjectResponse struct { + ProjectID string `json:"projectId"` + ProjectNumber string `json:"projectNumber"` +} + +// FlattenProjectNumbersToIDs converts a project number to project ID. +func FlattenProjectNumbersToIDs(config *Config, fromServer *string) *string { + if fromServer == nil { + return nil + } + // Look for a number somewhere in here. + editedServer := projectNumberRegex.ReplaceAllStringFunc(*fromServer, func(number string) string { + config.Logger.Infof("Preparing to use Cloud Resource Manager to convert %s to project id", number) + + p, err := fetchProjectInfo(config, number) + if err != nil { + config.Logger.Warning(err) + return number + } + + if strings.HasPrefix(number, "projects/") { + p.ProjectID = "projects/" + p.ProjectID + } + if strings.HasPrefix(number, "metricsScopes/") { + p.ProjectID = "metricsScopes/" + p.ProjectID + } + + return p.ProjectID + }) + return &editedServer +} + +var fetchProjectInfo = FetchProjectInfo + +// ExpandProjectIDsToNumbers converts a project ID to a project number. +func ExpandProjectIDsToNumbers(config *Config, fromConfig *string) (*string, error) { + if fromConfig == nil { + return nil, nil + } + + // Look for a project id somewhere in here. + editedConfig := projectIDRegex.ReplaceAllStringFunc(*fromConfig, func(id string) string { + config.Logger.Infof("Preparing to convert %s to project number", id) + + p, err := fetchProjectInfo(config, id) + if err != nil { + config.Logger.Warning(err) + return id + } + + if strings.HasPrefix(id, "projects/") { + p.ProjectNumber = "projects/" + p.ProjectNumber + } + if strings.HasPrefix(id, "metricsScopes/") { + p.ProjectNumber = "metricsScopes/" + p.ProjectNumber + } + + return p.ProjectNumber + }) + return &editedConfig, nil +} + +// FetchProjectInfo returns a ProjectResponse from CloudResourceManager. +func FetchProjectInfo(config *Config, projectIdentifier string) (ProjectResponse, error) { + var p ProjectResponse + trimmedIdentifier := strings.TrimPrefix(projectIdentifier, "projects/") + trimmedIdentifier = strings.TrimPrefix(trimmedIdentifier, "metricsScopes/") + trimmedIdentifier = strings.TrimSuffix(trimmedIdentifier, "/") + retryDetails, err := SendRequest(context.TODO(), config, "GET", "https://cloudresourcemanager.googleapis.com/v1/projects/"+trimmedIdentifier, nil, nil) + if err != nil { + return p, fmt.Errorf("failed to send request for project info using identifier %q: %s", projectIdentifier, err) + } + if err := ParseResponse(retryDetails.Response, &p); err != nil { + return p, fmt.Errorf("failed to parse response %v for project with identifier %q: %s", retryDetails.Response, projectIdentifier, err) + } + + return p, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/resource.go b/mmv1/third_party/terraform/tpgdclresource/resource.go new file mode 100755 index 000000000000..064fc27b5e39 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/resource.go @@ -0,0 +1,22 @@ +package tpgdclresource + +type Resource interface { + Describe() ServiceTypeVersion +} + +// ServiceTypeVersion is a tuple that can uniquely identify a +// DCL resource type. +type ServiceTypeVersion struct { + // Service indicates the service to which this resource + // belongs, e.g., "compute". It is roughly analogous to the + // K8S "Group" identifier. + Service string + + // Type identifies the particular type of this resource, + // e.g., "ComputeInstance". It maps to the K8S "Kind". + Type string + + // Version is the DCL version of the resource, e.g., + // "beta" or "ga". + Version string +} diff --git a/mmv1/third_party/terraform/tpgdclresource/retry.go b/mmv1/third_party/terraform/tpgdclresource/retry.go new file mode 100755 index 000000000000..a543804b5916 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/retry.go @@ -0,0 +1,139 @@ +package tpgdclresource + +import ( + "context" + "net/http" + "time" + + "github.com/cenkalti/backoff" + glog "github.com/golang/glog" +) + +// Stop is a value that indicates that no more retries should be attempted. +const Stop time.Duration = -1 + +// BackoffInitialInterval is the default InitialInterval value for Backoff. +const BackoffInitialInterval = 500 * time.Millisecond + +// BackoffMaxInterval is the default MaxInterval value for Backoff. +const BackoffMaxInterval = 30 * time.Second + +// RetryDetails provides information about an operation that a Retry implementation +// can use to make decisions about when or if to perform further requests. +type RetryDetails struct { + Request *http.Request + Response *http.Response +} + +// Operation is a retryable function. Implementations should return nil to indicate +// that the operation has concluded successfully, OperationNotDone to indicate +// that the operation should be retried, and any other error to indicate that a +// non-retryable error has occurred. +type Operation func(ctx context.Context) (*RetryDetails, error) + +// Retry provides an interface for handling retryable operations in a flexible manner. +type Retry interface { + // RetryAfter returns the amount of time that should elapse before an operation is re-run. Returning + // Stop indicates that no more retries should occur, and returning zero indicates that the operation + // should be immediately retried. + RetryAfter(details *RetryDetails) time.Duration +} + +// RetryProvider allows callers to provide custom retry behavior. +type RetryProvider interface { + // New returns an initialized Retry. + New() Retry +} + +// NoRetry is a Retry implementation that will never retry. +type NoRetry struct{} + +// RetryAfter implementation that never retries. +func (n *NoRetry) RetryAfter(_ *RetryDetails) time.Duration { + return Stop +} + +// Reset is a no-op. +func (n *NoRetry) Reset() {} + +// Backoff is a Retry implementation that uses exponential backoff with jitter. +type Backoff struct { + // InitialInterval sets the time interval for the first retry delay. + InitialInterval time.Duration + // MaxInterval is the largest amount of time that should elapse between retries. + MaxInterval time.Duration + + bo *backoff.ExponentialBackOff +} + +// NewBackoff returns a Backoff with sensible defaults set. +func NewBackoff() *Backoff { + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = BackoffMaxInterval + bo.InitialInterval = BackoffInitialInterval + bo.MaxElapsedTime = 0 + return &Backoff{ + bo: bo, + } +} + +// NewBackoffWithOptions returns a Backoff with caller-supplied parameters. +func NewBackoffWithOptions(initialInterval, maxInterval time.Duration) *Backoff { + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = maxInterval + bo.InitialInterval = initialInterval + bo.MaxElapsedTime = 0 + return &Backoff{ + bo: bo, + } +} + +// RetryAfter implementation that uses exponential backoff. +func (n *Backoff) RetryAfter(_ *RetryDetails) time.Duration { + if next := n.bo.NextBackOff(); next != backoff.Stop { + return next + } + return Stop +} + +// BackoffRetryProvider is a default RetryProvider that returns a Backoff. +type BackoffRetryProvider struct{} + +// New returns an initialized Retry. +func (r *BackoffRetryProvider) New() Retry { + return NewBackoff() +} + +// Do performs op as a retryable operation, using retry to determine when and if to retry. +// Do will only continue if a OperationNotDone{} is returned. If op() returns another error +// or no error, Do will finish. +// OperationNotDone{} may have an error inside of it, indicating that it's a retryable error. +func Do(ctx context.Context, op Operation, retryProvider RetryProvider) error { + retry := retryProvider.New() + for { + details, err := op(ctx) + // Responsible for returning nil error too. + if _, ok := err.(OperationNotDone); !ok { + return err + } + + w := retry.RetryAfter(details) + if w == Stop { + if e, ok := err.(OperationNotDone); ok { + if e.Err != nil { + return e.Err + } + } + return OperationNotDone{} + } + + t := time.NewTimer(w) + select { + case <-ctx.Done(): + t.Stop() + glog.Info("retryable operation canceled by context") + return OperationNotDone{} + case <-t.C: + } + } +} diff --git a/mmv1/third_party/terraform/tpgdclresource/schema.go b/mmv1/third_party/terraform/tpgdclresource/schema.go new file mode 100755 index 000000000000..17fe5d167de0 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/schema.go @@ -0,0 +1,211 @@ +package tpgdclresource + +import ( + "fmt" + "strings" +) + +// FieldType is an enum of all the types a field can be. +type FieldType int + +const ( + // UnknownType refers to a Field that does not have a proper type defined. + UnknownType FieldType = iota + // MapType refers to a Field that is a Map (typically from string to string). + MapType + // EnumType refers to a Field that is an Enum. + EnumType + // ArrayType refers to a Field that is an Array of any kind. + ArrayType + // ObjectType refers to a Field that is a dictionary with set subfields. + ObjectType + // ReferenceType refers to a Field that is referencing another GCP resource. + ReferenceType + // DoubleType refers to a Field that is a Double. + DoubleType + // StringType refers to a Field that is a String. + StringType + // TimeType refers to a Field that is a Timestamp. + TimeType + // IntegerType refers to a Field that is an Integer. + IntegerType + // BooleanType refers to a Field that is a Boolean. + BooleanType + // StatusType refers to a Field that is a Status. + StatusType + // ReusedType refers to a Field that does not require additional generation because it + // is the same type as another field already being generated. + ReusedType + // UntypedType refers to a type that has no type (in Go-speak, that's an interface{}). + // This can only be used for untyped maps (in proto-speak, google.protobuf.Struct) and cannot be used anywhere else. + // This will not work properly if used outside of a map. + UntypedType +) + +// Schema is the Entire OpenAPI schema. +type Schema struct { + Info *Info `yaml:"info"` + Paths *Paths `yaml:"paths"` + Components *Components `yaml:"components"` +} + +// ResolveDefinition returns the schema component being referenced. +func (s *Schema) ResolveDefinition(ref string) (*Component, error) { + if after, ok := strings.CutPrefix(ref, "#/components/schemas/"); ok { + if props, ok := s.Components.Schemas[after]; ok { + return props, nil + } + } + return nil, fmt.Errorf("could not resolve reference %q\v", ref) +} + +// Link is a URL plus text that should be displayed to an end user, usually in docs. +type Link struct { + Text string `yaml:"text"` + URL string `yaml:"url"` +} + +// Info is the Info block for the OpenAPI schema. +type Info struct { + Title string `yaml:"title"` + Description string `yaml:"description"` + StructName string `yaml:"x-dcl-struct-name,omitempty"` + HasIAM bool `yaml:"x-dcl-has-iam"` + Mutex string `yaml:"x-dcl-mutex,omitempty"` + Note string `yaml:"x-dcl-note,omitempty"` + Warning string `yaml:"x-dcl-warning,omitempty"` + Reference *Link `yaml:"x-dcl-ref,omitempty"` + Guides []*Link `yaml:"x-dcl-guides,omitempty"` +} + +// ResourceTitle returns the title of this resource. +func (i *Info) ResourceTitle() string { + return strings.Split(i.Title, "/")[1] +} + +// Paths is the Paths block for the OpenAPI schema. +type Paths struct { + Get *Path `yaml:"get"` + Apply *Path `yaml:"apply"` + Delete *Path `yaml:"delete,omitempty"` + DeleteAll *Path `yaml:"deleteAll,omitempty"` + List *Path `yaml:"list,omitempty"` +} + +// Path is the Path used for a method. +type Path struct { + Description string `yaml:"description"` + Parameters []PathParameters `yaml:"parameters"` +} + +// PathParameters is the Parameters for a given Path. +type PathParameters struct { + Name string `yaml:"name"` + Required bool `yaml:"required"` + Description string `yaml:"description,omitempty"` + Schema *PathParametersSchema `yaml:"schema,omitempty"` +} + +// PathParametersSchema is used to store the type. It is typically set to "string" +type PathParametersSchema struct { + Type string `yaml:"type"` +} + +// Components maps a Component name to the Component. +type Components struct { + Schemas map[string]*Component +} + +// Component contains all the information for a component (resource or reused type) +type Component struct { + Title string `yaml:"title,omitempty"` + ID string `yaml:"x-dcl-id,omitempty"` + Locations []string `yaml:"x-dcl-locations,omitempty"` + UsesStateHint bool `yaml:"x-dcl-uses-state-hint,omitempty"` + ParentContainer string `yaml:"x-dcl-parent-container,omitempty"` + LabelsField string `yaml:"x-dcl-labels,omitempty"` + HasCreate bool `yaml:"x-dcl-has-create"` + HasIAM bool `yaml:"x-dcl-has-iam"` + ReadTimeout int `yaml:"x-dcl-read-timeout"` + ApplyTimeout int `yaml:"x-dcl-apply-timeout"` + DeleteTimeout int `yaml:"x-dcl-delete-timeout"` + + // TODO: It appears that reused types are not fully conforming to the same spec as the rest of the components. + // Reused Types seem to follow the property spec, but not the component spec. + // This means that we need to have component "inline" all of the schema property fields to avoid having to override YAML parsing logic. + SchemaProperty Property `yaml:",inline"` +} + +// Property contains all information for a field (i.e. property) +type Property struct { + Type string `yaml:"type,omitempty"` + Format string `yaml:"format,omitempty"` + AdditionalProperties *Property `yaml:"additionalProperties,omitempty"` + Ref string `yaml:"$ref,omitempty"` + GoName string `yaml:"x-dcl-go-name,omitempty"` + GoType string `yaml:"x-dcl-go-type,omitempty"` + ReadOnly bool `yaml:"readOnly,omitempty"` + Description string `yaml:"description,omitempty"` + Immutable bool `yaml:"x-kubernetes-immutable,omitempty"` + Conflicts []string `yaml:"x-dcl-conflicts,omitempty"` + Default any `yaml:"default,omitempty"` + ServerDefault bool `yaml:"x-dcl-server-default,omitempty"` + ServerGeneratedParameter bool `yaml:"x-dcl-server-generated-parameter,omitempty"` + Sensitive bool `yaml:"x-dcl-sensitive,omitempty"` + ForwardSlashAllowed bool `yaml:"x-dcl-forward-slash-allowed,omitempty"` + SendEmpty bool `yaml:"x-dcl-send-empty,omitempty"` + ResourceReferences []*PropertyResourceReference `yaml:"x-dcl-references,omitempty"` + Enum []string `yaml:"enum,omitempty"` + ListType string `yaml:"x-dcl-list-type,omitempty"` + Items *Property `yaml:"items,omitempty"` + Unreadable bool `yaml:"x-dcl-mutable-unreadable,omitempty"` + ExtractIfEmpty bool `yaml:"x-dcl-extract-if-empty,omitempty"` + Required []string `yaml:"required,omitempty"` + Properties map[string]*Property `yaml:"properties,omitempty"` + Deprecated bool `yaml:"x-dcl-deprecated,omitempty"` + OptionalType bool `yaml:"x-dcl-optional-type,omitempty"` + Parameter bool `yaml:"x-dcl-parameter,omitempty"` + HasLongForm bool `yaml:"x-dcl-has-long-form,omitempty"` +} + +// IsOptional returns if the type is an optional type. +func (p *Property) IsOptional() bool { + return p.OptionalType +} + +// TypeEnum returns an enum referring to the type. +func (p *Property) TypeEnum() FieldType { + switch p.Type { + case "string": + if p.GoType != "" && p.GoType != "string" { + return EnumType + } else if len(p.ResourceReferences) > 0 { + return ReferenceType + } + return StringType + case "OptionalString": + return StringType + case "number", "OptionalFloat": + return DoubleType + case "integer", "OptionalInt": + return IntegerType + case "boolean", "OptionalBool": + return BooleanType + case "object": + if p.AdditionalProperties != nil && p.AdditionalProperties.GoType != "" && len(p.AdditionalProperties.Properties) != 0 { + return MapType + } + return ObjectType + case "array": + return ArrayType + } + return UnknownType +} + +// PropertyResourceReference contains all resource reference information. +type PropertyResourceReference struct { + Resource string `yaml:"resource"` + Field string `yaml:"field"` + Format string `yaml:"format,omitempty"` + Parent bool `yaml:"parent,omitempty"` +} diff --git a/mmv1/third_party/terraform/tpgdclresource/strings.go b/mmv1/third_party/terraform/tpgdclresource/strings.go new file mode 100755 index 000000000000..1e1fd51d3830 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/strings.go @@ -0,0 +1,149 @@ +package tpgdclresource + +import ( + "regexp" + "strings" +) + +// Map from initialism -> TitleCase variant +// We can assume camelCase is the same as TitleCase except that we downcase the +// first segment +var initialisms = map[string]string{ + "ai": "AI", + "ip": "IP", + "os": "OS", + "ipv4": "IPv4", + "ipv6": "IPv6", + "oauth": "OAuth", + "oauth2": "OAuth2", + "tpu": "TPU", + "vpc": "VPC", + "v1beta1": "V1Beta1", +} + +// SnakeToTitleCase converts a snake_case string to TitleCase / Go struct case. +func SnakeToTitleCase(s string) string { + return strings.Join(SnakeToTitleParts(s), "") +} + +// SnakeToJSONCase converts a snake_case string to jsonCase / camelCase, for use +// in JSON serialization. +func SnakeToJSONCase(s string) string { + parts := SnakeToTitleParts(s) + if len(parts) > 0 { + parts[0] = strings.ToLower(parts[0]) + } + + return strings.Join(parts, "") +} + +// SnakeToTitleParts returns the parts of a snake_case string titlecased as an +// array, taking into account common initialisms. +func SnakeToTitleParts(s string) []string { + parts := []string{} + segments := strings.Split(s, "_") + for _, seg := range segments { + if v, ok := initialisms[seg]; ok { + parts = append(parts, v) + } else { + if len(seg) < 1 { + continue + } + parts = append(parts, strings.ToUpper(seg[0:1])+seg[1:]) + } + } + + return parts +} + +// SnakeToTitleCasePath converts a resource path from snake to title case. For +// example: foo_bar.baz.qux -> FooBar.Baz.Qux +func SnakeToTitleCasePath(s, sep string) string { + str := []string{} + for _, p := range strings.Split(s, sep) { + str = append(str, SnakeToTitleCase(p)) + } + return strings.Join(str, sep) +} + +// TitleToCamelCasePath converts a resource path from title case to lower title case. +// For example: FooBar.Baz.Qux -> fooBar.baz.qux +func TitleToCamelCasePath(s string) string { + // Lowercase the first character and every character following a . + parts := strings.Split(s, ".") + for i, part := range parts { + parts[i] = strings.ToLower(part[:1]) + part[1:] + } + return strings.Join(parts, ".") +} + +// ProtoCamelCase converts a snake case name to a upper camel case name using the +// go protoc special rules: convert to camel case, except when +// the character following the underscore is a digit; e.g., +// foo_bar_2 -> FooBar_2. +// From: http://google3/net/goa/codegen/names.go;l=14;rcl=294425921 +func ProtoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// TitleToSnakeCase takes in a TitleCase string and returns a snake_case string. +func TitleToSnakeCase(s string) string { + for k, v := range initialisms { + kCap := strings.ToUpper(k[0:1]) + k[1:] + s = strings.Replace(s, v, kCap, -1) + } + str := regexp.MustCompile("(.)([A-Z][a-z]+)").ReplaceAllString(s, "${1}_${2}") + return strings.ToLower(regexp.MustCompile("([a-z0-9])([A-Z])").ReplaceAllString(str, "${1}_${2}")) +} + +// StringSliceContains returns true if the slice ss contains string s. +func StringSliceContains(s string, ss []string) bool { + for _, st := range ss { + if st == s { + return true + } + } + return false +} diff --git a/mmv1/third_party/terraform/tpgdclresource/timestamp.go b/mmv1/third_party/terraform/tpgdclresource/timestamp.go new file mode 100755 index 000000000000..5c268b4b7cf5 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/timestamp.go @@ -0,0 +1,17 @@ +package tpgdclresource + +import ( + "time" +) + +// ProtoToTime converts a string from a DCL proto time string to a time.Time. +func ProtoToTime(s string) time.Time { + // Invalid time values will be picked up downstream. + t, _ := time.Parse(time.RFC3339, s) + return t +} + +// TimeToProto converts a time.Time to a proto time string. +func TimeToProto(t time.Time) string { + return t.Format(time.RFC3339) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go b/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go index 2e60238e3daa..13f79ecfabdb 100644 --- a/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go +++ b/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go @@ -5,7 +5,6 @@ import ( "fmt" "log" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -15,7 +14,7 @@ func OldValue(old, new interface{}) interface{} { } func HandleNotFoundDCLError(err error, d *schema.ResourceData, resourceName string) error { - if dcl.IsNotFound(err) { + if IsNotFound(err) { log.Printf("[WARN] Removing %s because it's gone", resourceName) // The resource doesn't exist anymore d.SetId("") @@ -46,3 +45,35 @@ func ResourceContainerAwsNodePoolCustomizeDiffFunc(_ context.Context, diff *sche return nil } + +type DCLLogger struct{} + +// Fatal records Fatal errors. +func (l DCLLogger) Fatal(args ...interface{}) { + log.Fatal(args...) +} + +// Fatalf records Fatal errors with added arguments. +func (l DCLLogger) Fatalf(format string, args ...interface{}) { + log.Fatalf(fmt.Sprintf("[DEBUG][DCL FATAL] %s", format), args...) +} + +// Info records Info errors. +func (l DCLLogger) Info(args ...interface{}) { + log.Print(args...) +} + +// Infof records Info errors with added arguments. +func (l DCLLogger) Infof(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL INFO] %s", format), args...) +} + +// Warningf records Warning errors with added arguments. +func (l DCLLogger) Warningf(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL WARNING] %s", format), args...) +} + +// Warning records Warning errors. +func (l DCLLogger) Warning(args ...interface{}) { + log.Print(args...) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/transport.go b/mmv1/third_party/terraform/tpgdclresource/transport.go new file mode 100755 index 000000000000..ec10f5545e98 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/transport.go @@ -0,0 +1,274 @@ +package tpgdclresource + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + apihttp "google.golang.org/api/transport/http" +) + +// SendRequest applies the credentials in the provided Config to a request with the specified +// verb, url, and body. It returns the Response from the server if the request returns a +// 2XX success code, or the *googleapi.Error if it returns any other code. The retry is +// optional; if supplied HTTP errors that are deemed temporary will be retried according +// to the policy implemented by the retry. +func SendRequest(ctx context.Context, c *Config, verb, url string, body *bytes.Buffer, retryProvider RetryProvider) (*RetryDetails, error) { + hdrs := http.Header{} + for h, v := range c.header { + for _, s := range v { + hdrs.Add(h, s) + } + } + hdrs.Set("User-Agent", c.UserAgent()) + hdrs.Set("Content-Type", c.contentType) + + u, err := AddQueryParams(url, c.queryParams) + if err != nil { + return nil, err + } + + hasUserProjectOverride, billingProject := UserProjectOverride(c, u) + if hasUserProjectOverride { + hdrs.Set("X-Goog-User-Project", billingProject) + } + + mtls, err := GetMTLSEndpoint(u) + if err != nil { + return nil, err + } + + options := []option.ClientOption{ + option.WithScopes(Scopes...), + internaloption.WithDefaultEndpoint(u), + internaloption.WithDefaultMTLSEndpoint(mtls), + } + for _, o := range c.clientOptions { + options = append(options, o) + } + + httpClient, endpoint, err := apihttp.NewClient(ctx, options...) + if err != nil { + return nil, err + } + if endpoint != "" { + u = endpoint + } + + if _, ok := httpClient.Transport.(loggingTransport); !ok { + // In cases where the config has been created using WithHTTPClient() we want to + // replace the default transport with our logging transport only once. + httpClient = &http.Client{ + Transport: loggingTransport{ + underlyingTransport: httpClient.Transport, + logger: c.Logger, + }, + CheckRedirect: httpClient.CheckRedirect, + Jar: httpClient.Jar, + Timeout: httpClient.Timeout, + } + } + + if body == nil { + // A nil value indicates an empty request body. + body = &bytes.Buffer{} + } + bodyBytes := body.Bytes() + req, err := http.NewRequestWithContext(ctx, verb, u, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, err + } + req.Header = hdrs + + var res *http.Response + if retryProvider == nil { + res, err = httpClient.Do(req) + if err != nil { + return nil, err + } + err = googleapi.CheckResponse(res) + if err != nil { + // If this is an error, we will not be returning the + // body, so we should close it. + googleapi.CloseBody(res) + return nil, err + } + return &RetryDetails{Request: req, Response: res}, nil + } + + // The start time of request retries is used to determine if an HTTP error is still retryable. + start := time.Now() + err = Do(ctx, func(ctx context.Context) (*RetryDetails, error) { + // Reset req body before http call. + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + res, err = httpClient.Do(req) + if err != nil { + return nil, err + } + if err := googleapi.CheckResponse(res); err != nil { + // If this is an error, we will not be returning the + // body, so we should close it. + googleapi.CloseBody(res) + if IsRetryableRequestError(c, err, false, start) { + return nil, OperationNotDone{Err: err} + } + return nil, err + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + return &RetryDetails{Request: req.Clone(ctx), Response: res}, err + }, retryProvider) + if err != nil { + return nil, err + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + return &RetryDetails{Request: req, Response: res}, nil +} + +// AddQueryParams adds the specified query parameters to the specified url. +func AddQueryParams(rawurl string, params map[string]string) (string, error) { + u, err := url.Parse(rawurl) + if err != nil { + return "", err + } + q := u.Query() + for k, v := range params { + q.Set(k, v) + } + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ParseResponse reads a JSON response into a Go struct +func ParseResponse(resp *http.Response, ptr any) error { + defer resp.Body.Close() + return json.NewDecoder(resp.Body).Decode(ptr) +} + +// IsRetryableRequestError returns true if an error is determined to be +// a common retryable error based on heuristics about GCP API behaviours. +// The start time is used to determine if errors with custom timeouts should be retried. +func IsRetryableRequestError(c *Config, err error, retryNotFound bool, start time.Time) bool { + // Return transient errors that should be retried. + if IsRetryableHTTPError(err, c.codeRetryability, start) || (retryNotFound && IsNotFound(err)) { + c.Logger.Infof("Error appears retryable: %s", err) + return true + } + + if IsNonRetryableHTTPError(err, c.codeRetryability, start) { + c.Logger.Infof("Error appears not to be retryable: %s", err) + return false + } + + // Assume other errors are retryable. + c.Logger.Warningf("Unexpected HTTP error, assuming retryable: %s", err) + return true +} + +// Nprintf takes in a format string (with format {{key}} instead of %s) and a params map. +// Returns filled string. +func Nprintf(format string, params map[string]any) string { + pq := strings.Split(format, "?") + path := pq[0] + query := "" + if len(pq) == 2 { + query = pq[1] + } else if len(pq) > 2 { + return "error: too many path separators." + } + for key, val := range params { + r := regexp.MustCompile(`{{\s?` + regexp.QuoteMeta(key) + `\s?}}`) + path = r.ReplaceAllString(path, fmt.Sprintf("%v", val)) + } + for key, val := range params { + r := regexp.MustCompile(`{{\s?` + regexp.QuoteMeta(key) + `\s?}}`) + query = r.ReplaceAllString(query, url.QueryEscape(fmt.Sprintf("%v", val))) + } + if query != "" { + return path + "?" + query + } + return path +} + +// URL takes in a partial URL, default base path, optional user-specified base-path and a params map. +func URL(urlpath, basePath, userPath string, params map[string]any) string { + if userPath != "" { + if strings.HasSuffix(userPath, "/") { + userPath = userPath[:len(userPath)-1] + } + return Nprintf(strings.Join([]string{userPath, urlpath}, "/"), params) + } + if before, ok := strings.CutSuffix(basePath, "/"); ok { + basePath = before + } + return Nprintf(strings.Join([]string{basePath, urlpath}, "/"), params) +} + +// ResponseBodyAsJSON reads the response body from a *RetryDetails and returns +// it as unstructured JSON in a map[string]interface{}. +func ResponseBodyAsJSON(retry *RetryDetails) (map[string]any, error) { + defer retry.Response.Body.Close() + b, err := io.ReadAll(retry.Response.Body) + if err != nil { + return nil, err + } + + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + + return m, nil +} + +// GetMTLSEndpoint returns the API endpoint used for mTLS authentication. +func GetMTLSEndpoint(baseEndpoint string) (string, error) { + u, err := url.Parse(baseEndpoint) + if err != nil { + return "", err + } + portParts := strings.Split(u.Host, ":") + if len(portParts) == 0 || portParts[0] == "" { + return "", fmt.Errorf("api endpoint %q is missing host", u.String()) + } + domainParts := strings.Split(portParts[0], ".") + if len(domainParts) > 1 { + u.Host = fmt.Sprintf("%s.mtls.%s", domainParts[0], strings.Join(domainParts[1:], ".")) + } else { + u.Host = fmt.Sprintf("%s.mtls", domainParts[0]) + } + if len(portParts) > 1 { + u.Host = fmt.Sprintf("%s:%s", u.Host, portParts[1]) + } + return u.String(), nil +} + +// UserProjectOverride returns true if user project override should be used and the project that should be set. +func UserProjectOverride(c *Config, url string) (bool, string) { + if !c.userOverrideProject { + return false, "" + } + + if c.billingProject != "" { + return true, c.billingProject + } + + r := regexp.MustCompile(`projects/([a-z0-9A-Z-:_]*)/`) + g := r.FindStringSubmatch(url) + if g != nil && len(g) > 1 { + return true, g[1] + } + + // This URL does not contain a project and no project was found in the URL. + // This most likely means a non-project resource was used accidentally. + return false, "" +} diff --git a/mmv1/third_party/terraform/tpgdclresource/type.go b/mmv1/third_party/terraform/tpgdclresource/type.go new file mode 100755 index 000000000000..c7de8454f3fa --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/type.go @@ -0,0 +1,51 @@ +package tpgdclresource + +// Bool converts a bool to a *bool +func Bool(b bool) *bool { + return &b +} + +// Float64 converts a float64 to *float64 +func Float64(f float64) *float64 { + return &f +} + +// Float64OrNil converts a float64 to *float64, returning nil if it's empty (0.0). +func Float64OrNil(f float64) *float64 { + if f == 0.0 { + return nil + } + return &f +} + +// Int64 converts an int64 to *int64 +func Int64(i int64) *int64 { + return &i +} + +// Int64OrNil converts an int64 to *int64, returning nil if it's empty (0). +func Int64OrNil(i int64) *int64 { + if i == 0 { + return nil + } + return &i +} + +// String converts a string to a *string +func String(s string) *string { + return &s +} + +// StringWithError converts a string to a *string, returning a nil error to +// satisfy type signatures that expect one. +func StringWithError(s string) (*string, error) { + return &s, nil +} + +// StringOrNil converts a string to a *string, returning nil if it's empty (""). +func StringOrNil(s string) *string { + if s == "" { + return nil + } + return &s +} diff --git a/mmv1/third_party/terraform/tpgdclresource/update.go b/mmv1/third_party/terraform/tpgdclresource/update.go new file mode 100755 index 000000000000..06e4d0f0673c --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/update.go @@ -0,0 +1,106 @@ +package tpgdclresource + +import ( + "fmt" + "regexp" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "bitbucket.org/creachadair/stringset" +) + +// UpdateMask creates a Update Mask string according to https://google.aip.dev/161 +func UpdateMask(ds []*FieldDiff) string { + var ss []string + for _, v := range ds { + ss = append(ss, convertUpdateMaskVal(v.FieldName)) + } + + dupesRemoved := stringset.New(ss...).Elements() + + // Sorting the entries is optional, but makes it easier to read + test. + sort.Strings(dupesRemoved) + return strings.Join(dupesRemoved, ",") +} + +func titleCaseToCamelCase(s string) string { + r, n := utf8.DecodeRuneInString(s) + p := string(unicode.ToLower(r)) + p = p + s[n:] + return p +} + +// Diffs come in the form Http.AuthInfo.Password +// Needs to be in the form http.authInfo.password +func convertUpdateMaskVal(s string) string { + r := regexp.MustCompile(`\[\d\]`) + + // camelCase string (right now, it's in TitleCase). + parts := strings.Split(s, ".") + var p []string + for _, q := range parts { + if r.MatchString(q) { + // Indexing into a repeated field. + bareFieldName := r.ReplaceAllString(q, "") + p = append(p, titleCaseToCamelCase(bareFieldName)) + + // Repeated fields cannot be intermediary in a field mask, so we + // must terminate the field mask here. + break + } else { + p = append(p, titleCaseToCamelCase(q)) + } + } + + // * notation should only be used if this is not the last field. + // Example: res.array.* should be res.array, but res.array.*.bar means "update only bar in all my array fields" + if p[len(p)-1] == "*" { + p = p[0 : len(p)-1] + } + + return strings.Join(p, ".") +} + +// TopLevelUpdateMask returns only the top-level fields. +func TopLevelUpdateMask(ds []*FieldDiff) string { + var ss []string + for _, v := range ds { + part := strings.Split(v.FieldName, ".")[0] + ss = append(ss, convertUpdateMaskVal(part)) + } + + dupesRemoved := stringset.New(ss...).Elements() + + // Sorting the entries is optional, but makes it easier to read + test. + sort.Strings(dupesRemoved) + return strings.Join(dupesRemoved, ",") +} + +// SnakeCaseUpdateMask returns the update mask, but all fields are snake case. +func SnakeCaseUpdateMask(ds []*FieldDiff) string { + var ss []string + for _, v := range ds { + ss = append(ss, TitleToSnakeCase(convertUpdateMaskVal(v.FieldName))) + } + dupesRemoved := stringset.New(ss...).Elements() + + // Sorting the entries is optional, but makes it easier to read + test. + sort.Strings(dupesRemoved) + return strings.Join(dupesRemoved, ",") +} + +// UpdateMaskWithPrefix returns a Standard Update Mask with a prefix attached. +func UpdateMaskWithPrefix(ds []*FieldDiff, prefix string) string { + um := UpdateMask(ds) + parts := strings.Split(um, ",") + + var ss []string + + for _, part := range parts { + ss = append(ss, fmt.Sprintf("%s.%s", prefix, part)) + } + + return strings.Join(ss, ",") +} diff --git a/mmv1/third_party/terraform/tpgdclresource/utils.go b/mmv1/third_party/terraform/tpgdclresource/utils.go new file mode 100755 index 000000000000..f20709050e4d --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/utils.go @@ -0,0 +1,16 @@ +package tpgdclresource + +import ( + "time" + + "github.com/google/go-cpy/cpy" +) + +// Copy makes a deep copy of an interface. +func Copy(src any) any { + copier := cpy.New( + cpy.Shallow(time.Time{}), + cpy.IgnoreAllUnexported(), + ) + return copier.Copy(src) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/validate.go b/mmv1/third_party/terraform/tpgdclresource/validate.go new file mode 100755 index 000000000000..6ff0fbbdffc4 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/validate.go @@ -0,0 +1,69 @@ +package tpgdclresource + +import ( + "fmt" + "strings" +) + +// Required takes in a DCL resource (represented as an interface) and a dot-notated path (with JSON names). If the path is not set, an error will be returned. +func Required(r any, path string) error { + var m map[string]any + m, err := ConvertToMap(r) + if err != nil { + return err + } + val, err := GetMapEntry(m, strings.Split(path, ".")) + if err != nil { + return err + } else if val == nil { + return fmt.Errorf("required value %q could not be found", path) + } + return nil +} + +// RequiredParameter takes in a value (typically one that's not exported in JSON) and returns an error if it is not set. +func RequiredParameter(v any, name string) error { + if IsZeroValue(v) { + return fmt.Errorf("%q must be set", name) + } + return nil +} + +func countOfNonempty(v ...any) int { + i := 0 + for _, val := range v { + if !IsEmptyValueIndirect(val) { + i++ + } + } + return i +} + +// ValidateAtMostOneOfFieldsSet returns an error if more than one of the provided values is nonempty. +func ValidateAtMostOneOfFieldsSet(fieldNames []string, v ...any) error { + if countOfNonempty(v...) > 1 { + return fmt.Errorf("more than one value set: %v", fieldNames) + } + return nil +} + +// ValidateAtLeastOneOfFieldsSet returns an error if none of the provided values is nonempty. +func ValidateAtLeastOneOfFieldsSet(fieldNames []string, v ...any) error { + if countOfNonempty(v...) == 0 { + return fmt.Errorf("zero values set: %v", fieldNames) + } + return nil +} + +// ValidateExactlyOneOfFieldsSet returns an error if 0 or 2+ of the provided values is nonempty. +func ValidateExactlyOneOfFieldsSet(fieldNames []string, v ...any) error { + if countOfNonempty(v...) != 1 { + return fmt.Errorf("not exactly one value set: %v", fieldNames) + } + return nil +} + +// AnySet returns true if any of the values provided is nonempty. +func AnySet(v ...any) bool { + return countOfNonempty(v...) > 0 +} diff --git a/mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl b/mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl deleted file mode 100644 index 8c32d4eca8e3..000000000000 --- a/mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl +++ /dev/null @@ -1,39 +0,0 @@ -package tpgresource - -import ( - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws{{ $.DCLVersion }}" - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure{{ $.DCLVersion }}" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" -) - -func FlattenContainerAwsNodePoolManagement(obj *containeraws.NodePoolManagement, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if obj == nil { - return nil - } - transformed := make(map[string]interface{}) - - if obj.AutoRepair == nil || obj.Empty() { - transformed["auto_repair"] = false - } else { - transformed["auto_repair"] = obj.AutoRepair - } - - return []interface{}{transformed} -} - -func FlattenContainerAzureNodePoolManagement(obj *containerazure.NodePoolManagement, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if obj == nil { - return nil - } - transformed := make(map[string]interface{}) - - if obj.AutoRepair == nil || obj.Empty() { - transformed["auto_repair"] = false - } else { - transformed["auto_repair"] = obj.AutoRepair - } - - return []interface{}{transformed} -} diff --git a/mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown b/mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown new file mode 100644 index 000000000000..261ce7030cf0 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown @@ -0,0 +1,274 @@ +--- +subcategory: "Apikeys" +description: |- + The Apikeys Key resource +--- + +# google_apikeys_key + +The Apikeys Key resource + +## Example Usage - android_key +A basic example of a android api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app123" + sha1_fingerprint = "1699466a142d4682a5f91b50fdf400f2358e2b0b" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + } +} + + +``` +## Example Usage - basic_key +A basic example of a api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + browser_key_restrictions { + allowed_referrers = [".*"] + } + } +} + + +``` +## Example Usage - ios_key +A basic example of a ios api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + ios_key_restrictions { + allowed_bundle_ids = ["com.google.app.macos"] + } + } +} + + +``` +## Example Usage - minimal_key +A minimal example of a api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" +} + + +``` +## Example Usage - server_key +A basic example of a server api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + server_key_restrictions { + allowed_ips = ["127.0.0.1"] + } + } +} + + +``` +## Example Usage - service_account_key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + project = google_project.project.project_id + service_account_email = google_service_account.key_service_account.email +} + +resource "google_project" "project" { + project_id = "app" + name = "app" + org_id = "123456789" + deletion_policy = "DELETE" +} + +resource "google_service_account" "key_service_account" { + account_id = "app" + project = google_project.project.project_id + display_name = "Test Service Account" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. + + + +- - - + +* `display_name` - + (Optional) + Human-readable display name of this API key. Modifiable by user. + +* `project` - + (Optional) + The project for the resource + +* `restrictions` - + (Optional) + Key restrictions. + +* `service_account_email` - + (Optional) + The email of the service account the key is bound to. If this field is specified, the key is a service account bound key and auth enabled. See [Documentation](https://cloud.google.com/docs/authentication/api-keys?#api-keys-bound-sa) for more details. + + + +The `restrictions` block supports: + +* `android_key_restrictions` - + (Optional) + The Android apps that are allowed to use the key. + +* `api_targets` - + (Optional) + A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed. + +* `browser_key_restrictions` - + (Optional) + The HTTP referrers (websites) that are allowed to use the key. + +* `ios_key_restrictions` - + (Optional) + The iOS apps that are allowed to use the key. + +* `server_key_restrictions` - + (Optional) + The IP addresses of callers that are allowed to use the key. + +The `android_key_restrictions` block supports: + +* `allowed_applications` - + (Required) + A list of Android applications that are allowed to make API calls with this key. + +The `allowed_applications` block supports: + +* `package_name` - + (Required) + The package name of the application. + +* `sha1_fingerprint` - + (Required) + The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter. + +The `api_targets` block supports: + +* `methods` - + (Optional) + Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*` + +* `service` - + (Required) + The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project. + +The `browser_key_restrictions` block supports: + +* `allowed_referrers` - + (Required) + A list of regular expressions for the referrer URLs that are allowed to make API calls with this key. + +The `ios_key_restrictions` block supports: + +* `allowed_bundle_ids` - + (Required) + A list of bundle IDs that are allowed when making API calls with this key. + +The `server_key_restrictions` block supports: + +* `allowed_ips` - + (Required) + A list of the caller IP addresses that are allowed to make API calls with this key. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/keys/{{name}}` + +* `key_string` - + Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method. + +* `uid` - + Output only. Unique id in UUID4 format. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Key can be imported using any of these accepted formats: +* `projects/{{project}}/locations/global/keys/{{name}}` +* `{{project}}/{{name}}` +* `{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Key using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/global/keys/{{name}}" + to = google_apikeys_key.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Key can be imported using one of the formats above. For example: + +``` +$ terraform import google_apikeys_key.default projects/{{project}}/locations/global/keys/{{name}} +$ terraform import google_apikeys_key.default {{project}}/{{name}} +$ terraform import google_apikeys_key.default {{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown b/mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown new file mode 100644 index 000000000000..c004c9e98c57 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown @@ -0,0 +1,319 @@ +--- +subcategory: "AssuredWorkloads" +description: |- + The AssuredWorkloads Workload resource +--- + +# google_assured_workloads_workload + +The AssuredWorkloads Workload resource + +## Example Usage - basic_workload +A basic test of a assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "FEDRAMP_MODERATE" + display_name = "{{display}}" + location = "us-west1" + organization = "123456789" + billing_account = "billingAccounts/000000-0000000-0000000-000000" + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + provisioned_resources_parent = "folders/519620126891" + + resource_settings { + display_name = "{{name}}" + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } + + violation_notifications_enabled = true + + workload_options { + kaj_enrollment_type = "KEY_ACCESS_TRANSPARENCY_OFF" + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - sovereign_controls_workload +A Sovereign Controls test of the assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "EU_REGIONS_AND_SUPPORT" + display_name = "display" + location = "europe-west9" + organization = "123456789" + billing_account = "billingAccounts/000000-0000000-0000000-000000" + enable_sovereign_controls = true + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +``` +## Example Usage - split_billing_partner_workload +A Split billing partner test of the assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "ASSURED_WORKLOADS_FOR_PARTNERS" + display_name = "display" + location = "europe-west8" + organization = "123456789" + billing_account = "billingAccounts/000000-0000000-0000000-000000" + partner = "SOVEREIGN_CONTROLS_BY_PSN" + + partner_permissions { + assured_workloads_monitoring = true + data_logs_viewer = true + service_access_approver = true + } + + partner_services_billing_account = "billingAccounts/01BF3F-2C6DE5-30C607" + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } + + violation_notifications_enabled = true + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `compliance_regime` - + (Required) + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT, IRS_1075 + +* `display_name` - + (Required) + Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload + +* `location` - + (Required) + The location for the resource + +* `organization` - + (Required) + The organization for the resource + + + +- - - + +* `billing_account` - + (Optional) + Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + +* `enable_sovereign_controls` - + (Optional) + Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers. + +* `kms_settings` - + (Optional) + **DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field. + +* `labels` - + (Optional) + Optional. Labels applied to the workload. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `partner` - + (Optional) + Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM + +* `partner_permissions` - + (Optional) + Optional. Permissions granted to the AW Partner SA account for the customer workload + +* `partner_services_billing_account` - + (Optional) + Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + +* `provisioned_resources_parent` - + (Optional) + Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} + +* `resource_settings` - + (Optional) + Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. + +* `violation_notifications_enabled` - + (Optional) + Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload. + +* `workload_options` - + (Optional) + Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads. + + + +The `kms_settings` block supports: + +* `next_rotation_time` - + (Required) + Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary. + +* `rotation_period` - + (Required) + Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours. + +The `partner_permissions` block supports: + +* `assured_workloads_monitoring` - + (Optional) + Optional. Allow partner to view violation alerts. + +* `data_logs_viewer` - + (Optional) + Allow the partner to view inspectability logs and monitoring violations. + +* `service_access_approver` - + (Optional) + Optional. Allow partner to view access approval logs. + +The `resource_settings` block supports: + +* `display_name` - + (Optional) + User-assigned resource display name. If not empty it will be used to create a resource with the specified name. + +* `resource_id` - + (Optional) + Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folder_id is assigned by Google. + +* `resource_type` - + (Optional) + Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER + +The `workload_options` block supports: + +* `kaj_enrollment_type` - + (Optional) + Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}` + +* `compliance_status` - + Output only. Count of active Violations in the Workload. + +* `compliant_but_disallowed_services` - + Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment. + +* `create_time` - + Output only. Immutable. The Workload creation timestamp. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `ekm_provisioning_response` - + Optional. Represents the Ekm Provisioning State of the given workload. + +* `kaj_enrollment_state` - + Output only. Represents the KAJ enrollment state of the given workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING, KAJ_ENROLLMENT_STATE_COMPLETE + +* `name` - + Output only. The resource name of the workload. + +* `resources` - + Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only. + +* `saa_enrollment_response` - + Output only. Represents the SAA enrollment response of the given workload. SAA enrollment response is queried during workloads.get call. In failure cases, user friendly error message is shown in SAA details page. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Workload can be imported using any of these accepted formats: +* `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}` +* `{{organization}}/{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Workload using one of the formats above. For example: + + +```tf +import { + id = "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}" + to = google_assured_workloads_workload.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Workload can be imported using one of the formats above. For example: + +``` +$ terraform import google_assured_workloads_workload.default organizations/{{organization}}/locations/{{location}}/workloads/{{name}} +$ terraform import google_assured_workloads_workload.default {{organization}}/{{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown b/mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown new file mode 100644 index 000000000000..7cb4d99ae05c --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown @@ -0,0 +1,599 @@ +--- +subcategory: "Cloud Deploy" +description: |- + The Cloud Deploy `DeliveryPipeline` resource +--- + +# google_clouddeploy_delivery_pipeline + +The Cloud Deploy `DeliveryPipeline` resource + +## Example Usage - canary_delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - canary_service_networking_delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - canaryrun_delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +} + + +``` +## Example Usage - verify_delivery_pipeline +tests creating and updating a delivery pipeline with deployment verification strategy +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. + + + +- - - + +* `annotations` - + (Optional) + User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `description` - + (Optional) + Description of the `DeliveryPipeline`. Max length is 255 characters. + +* `labels` - + (Optional) + Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + +* `serial_pipeline` - + (Optional) + SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`. + +* `suspended` - + (Optional) + When suspended, no new releases or rollouts can be created, but in-progress ones will complete. + + + +The `serial_pipeline` block supports: + +* `stages` - + (Optional) + Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow. + +The `stages` block supports: + +* `deploy_parameters` - + (Optional) + Optional. The deploy parameters to use for the target in this stage. + +* `profiles` - + (Optional) + Skaffold profiles to use when rendering the manifest for this stage's `Target`. + +* `strategy` - + (Optional) + Optional. The strategy to use for a `Rollout` to this stage. + +* `target_id` - + (Optional) + The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`. + +The `deploy_parameters` block supports: + +* `match_target_labels` - + (Optional) + Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target). + +* `values` - + (Required) + Required. Values are deploy parameters in key-value pairs. + +The `strategy` block supports: + +* `canary` - + (Optional) + Canary deployment strategy provides progressive percentage based deployments to a Target. + +* `standard` - + (Optional) + Standard deployment strategy executes a single deploy and allows verifying the deployment. + +The `canary` block supports: + +* `canary_deployment` - + (Optional) + Configures the progressive based deployment for a Target. + +* `custom_canary_deployment` - + (Optional) + Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments. + +* `runtime_config` - + (Optional) + Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. + +The `canary_deployment` block supports: + +* `percentages` - + (Required) + Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100. + +* `postdeploy` - + (Optional) + Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present. + +* `predeploy` - + (Optional) + Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present. + +* `verify` - + (Optional) + Whether to run verify tests after each percentage deployment. + +The `postdeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. + +The `predeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + +The `custom_canary_deployment` block supports: + +* `phase_configs` - + (Required) + Required. Configuration for each phase in the canary deployment in the order executed. + +The `phase_configs` block supports: + +* `percentage` - + (Required) + Required. Percentage deployment for the phase. + +* `phase_id` - + (Required) + Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + +* `postdeploy` - + (Optional) + Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase. + +* `predeploy` - + (Optional) + Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase. + +* `profiles` - + (Optional) + Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage. + +* `verify` - + (Optional) + Whether to run verify tests after the deployment. + +The `postdeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. + +The `predeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + +The `runtime_config` block supports: + +* `cloud_run` - + (Optional) + Cloud Run runtime configuration. + +* `kubernetes` - + (Optional) + Kubernetes runtime configuration. + +The `cloud_run` block supports: + +* `automatic_traffic_control` - + (Optional) + Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + +* `canary_revision_tags` - + (Optional) + Optional. A list of tags that are added to the canary revision while the canary phase is in progress. + +* `prior_revision_tags` - + (Optional) + Optional. A list of tags that are added to the prior revision while the canary phase is in progress. + +* `stable_revision_tags` - + (Optional) + Optional. A list of tags that are added to the final stable revision when the stable phase is applied. + +The `kubernetes` block supports: + +* `gateway_service_mesh` - + (Optional) + Kubernetes Gateway API service mesh configuration. + +* `service_networking` - + (Optional) + Kubernetes Service networking configuration. + +The `gateway_service_mesh` block supports: + +* `deployment` - + (Required) + Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service. + +* `http_route` - + (Required) + Required. Name of the Gateway API HTTPRoute. + +* `pod_selector_label` - + (Optional) + Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources. + +* `route_destinations` - + (Optional) + Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster. + +* `route_update_wait_time` - + (Optional) + Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time. + +* `service` - + (Required) + Required. Name of the Kubernetes Service. + +* `stable_cutback_duration` - + (Optional) + Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time. + +The `route_destinations` block supports: + +* `destination_ids` - + (Required) + Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and "@self" to include the Target cluster. + +* `propagate_service` - + (Optional) + Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified. + +The `service_networking` block supports: + +* `deployment` - + (Required) + Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service. + +* `disable_pod_overprovisioning` - + (Optional) + Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster. + +* `pod_selector_label` - + (Optional) + Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment. + +* `service` - + (Required) + Required. Name of the Kubernetes Service. + +The `standard` block supports: + +* `postdeploy` - + (Optional) + Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present. + +* `predeploy` - + (Optional) + Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present. + +* `verify` - + (Optional) + Whether to verify a deployment. + +The `postdeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. + +The `predeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}` + +* `condition` - + Output only. Information around the state of the Delivery Pipeline. + +* `create_time` - + Output only. Time at which the pipeline was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `etag` - + This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. Unique identifier of the `DeliveryPipeline`. + +* `update_time` - + Output only. Most recent time at which the pipeline was updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +DeliveryPipeline can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeliveryPipeline using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}" + to = google_clouddeploy_delivery_pipeline.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), DeliveryPipeline can be imported using one of the formats above. For example: + +``` +$ terraform import google_clouddeploy_delivery_pipeline.default projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}} +$ terraform import google_clouddeploy_delivery_pipeline.default {{project}}/{{location}}/{{name}} +$ terraform import google_clouddeploy_delivery_pipeline.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown b/mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown new file mode 100644 index 000000000000..f756bbb18144 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown @@ -0,0 +1,362 @@ +--- +subcategory: "Cloud Deploy" +description: |- + The Cloud Deploy `Target` resource +--- + +# google_clouddeploy_target + +The Cloud Deploy `Target` resource + +## Example Usage - multi_target +tests creating and updating a multi-target +```hcl +resource "google_clouddeploy_target" "primary" { + location = "us-west1" + name = "target" + deploy_parameters = {} + description = "multi-target description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + multi_target { + target_ids = ["1", "2"] + } + + project = "my-project-name" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - run_target +tests creating and updating a cloud run target +```hcl +resource "google_clouddeploy_target" "primary" { + location = "us-west1" + name = "target" + deploy_parameters = {} + description = "basic description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + project = "my-project-name" + require_approval = false + + run { + location = "projects/my-project-name/locations/us-west1" + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - target +Creates a basic Cloud Deploy target +```hcl +resource "google_clouddeploy_target" "primary" { + location = "us-west1" + name = "target" + + deploy_parameters = { + deployParameterKey = "deployParameterValue" + } + + description = "basic description" + + gke { + cluster = "projects/my-project-name/locations/us-west1/clusters/example-cluster-name" + } + + project = "my-project-name" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. + + + +- - - + +* `annotations` - + (Optional) + Optional. User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `anthos_cluster` - + (Optional) + Information specifying an Anthos Cluster. + +* `associated_entities` - + (Optional) + Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + +* `custom_target` - + (Optional) + Optional. Information specifying a Custom Target. + +* `deploy_parameters` - + (Optional) + Optional. The deploy parameters to use for this target. + +* `description` - + (Optional) + Optional. Description of the `Target`. Max length is 255 characters. + +* `execution_configs` - + (Optional) + Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`. + +* `gke` - + (Optional) + Information specifying a GKE Cluster. + +* `labels` - + (Optional) + Optional. Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `multi_target` - + (Optional) + Information specifying a multiTarget. + +* `project` - + (Optional) + The project for the resource + +* `require_approval` - + (Optional) + Optional. Whether or not the `Target` requires approval. + +* `run` - + (Optional) + Information specifying a Cloud Run deployment target. + + + +The `anthos_cluster` block supports: + +* `membership` - + (Optional) + Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + +The `associated_entities` block supports: + +* `anthos_clusters` - + (Optional) + Optional. Information specifying Anthos clusters as associated entities. + +* `entity_id` - + (Required) + The name for the key in the map for which this object is mapped to in the API + +* `gke_clusters` - + (Optional) + Optional. Information specifying GKE clusters as associated entities. + +The `anthos_clusters` block supports: + +* `membership` - + (Optional) + Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + +The `gke_clusters` block supports: + +* `cluster` - + (Optional) + Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + +* `internal_ip` - + (Optional) + Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). + +* `proxy_url` - + (Optional) + Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server. + +The `custom_target` block supports: + +* `custom_target_type` - + (Required) + Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + +The `execution_configs` block supports: + +* `artifact_storage` - + (Optional) + Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket ("gs://my-bucket") or a path within a bucket ("gs://my-bucket/my-dir"). If unspecified, a default bucket located in the same region will be used. + +* `execution_timeout` - + (Optional) + Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used. + +* `service_account` - + (Optional) + Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used. + +* `usages` - + (Required) + Required. Usages when this configuration should be applied. + +* `verbose` - + (Optional) + Optional. If true, additional logging will be enabled when running builds in this execution environment. + +* `worker_pool` - + (Optional) + Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used. + +The `gke` block supports: + +* `cluster` - + (Optional) + Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}. + +* `dns_endpoint` - + (Optional) + Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dns_endpoint` and `internal_ip` cannot be set to true. + +* `internal_ip` - + (Optional) + Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). + +* `proxy_url` - + (Optional) + Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server. + +The `multi_target` block supports: + +* `target_ids` - + (Required) + Required. The target_ids of this multiTarget. + +The `run` block supports: + +* `location` - + (Required) + Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/targets/{{name}}` + +* `create_time` - + Output only. Time at which the `Target` was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `etag` - + Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `target_id` - + Output only. Resource id of the `Target`. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. Unique identifier of the `Target`. + +* `update_time` - + Output only. Most recent time at which the `Target` was updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Target can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/targets/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Target using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/targets/{{name}}" + to = google_clouddeploy_target.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Target can be imported using one of the formats above. For example: + +``` +$ terraform import google_clouddeploy_target.default projects/{{project}}/locations/{{location}}/targets/{{name}} +$ terraform import google_clouddeploy_target.default {{project}}/{{location}}/{{name}} +$ terraform import google_clouddeploy_target.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown new file mode 100644 index 000000000000..66fbf2744973 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown @@ -0,0 +1,638 @@ +--- +subcategory: "ContainerAws" +description: |- + An Anthos cluster running on AWS. +--- + +# google_container_aws_cluster + +An Anthos cluster running on AWS. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_aws_cluster +A basic example of a containeraws cluster +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + admin_groups { + group = "group@domain.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +``` +## Example Usage - basic_enum_aws_cluster +A basic example of a containeraws cluster with lowercase enums +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +``` +## Example Usage - beta_basic_enum_aws_cluster +A basic example of a containeraws cluster with lowercase enums (beta) +```hcl +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + + instance_placement { + tenancy = "dedicated" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `authorization` - + (Required) + Configuration related to the cluster RBAC settings. + +* `aws_region` - + (Required) + The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region. + +* `control_plane` - + (Required) + Configuration related to the cluster control plane. + +* `fleet` - + (Required) + Fleet configuration. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `networking` - + (Required) + Cluster-wide networking configuration. + + + +The `authorization` block supports: + +* `admin_groups` - + (Optional) + Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +* `admin_users` - + (Required) + Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +The `control_plane` block supports: + +* `aws_services_authentication` - + (Required) + Authentication configuration for management of AWS resources. + +* `config_encryption` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster configuration. + +* `database_encryption` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster secrets. + +* `iam_instance_profile` - + (Required) + The name of the AWS IAM instance pofile to assign to each control plane replica. + +* `instance_placement` - + (Optional) + (Beta only) Details of placement information for an instance. + +* `instance_type` - + (Optional) + Optional. The AWS instance type. When unspecified, it defaults to `m5.large`. + +* `main_volume` - + (Optional) + Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. + +* `security_group_ids` - + (Optional) + Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster. + +* `ssh_config` - + (Optional) + Optional. SSH configuration for how to access the underlying control plane machines. + +* `subnet_ids` - + (Required) + The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ). + +* `tags` - + (Optional) + Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `version` - + (Required) + The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling . + +The `fleet` block supports: + +* `membership` - + The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. + +* `project` - + (Optional) + The number of the Fleet host project where this cluster will be registered. + +The `networking` block supports: + +* `per_node_pool_sg_rules_disabled` - + (Optional) + Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools. + +* `pod_address_cidr_blocks` - + (Required) + All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `service_address_cidr_blocks` - + (Required) + All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `vpc_id` - + (Required) + The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `binary_authorization` - + (Optional) + Configuration options for the Binary Authorization feature. + +* `description` - + (Optional) + Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes. + +* `logging_config` - + (Optional) + (Beta only) Logging configuration. + +* `project` - + (Optional) + The project for the resource + + + +The `admin_groups` block supports: + +* `group` - + (Required) + The name of the group, e.g. `my-group@domain.com`. + +The `admin_users` block supports: + +* `username` - + (Required) + The name of the user, e.g. `my-gcp-id@gmail.com`. + +The `binary_authorization` block supports: + +* `evaluation_mode` - + (Optional) + Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE + +The `aws_services_authentication` block supports: + +* `role_arn` - + (Required) + The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account. + +* `role_session_name` - + (Optional) + Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`. + +The `config_encryption` block supports: + +* `kms_key_arn` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster configuration. + +The `database_encryption` block supports: + +* `kms_key_arn` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster secrets. + +The `instance_placement` block supports: + +* `tenancy` - + (Optional) + The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST + +The `main_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `throughput` - + (Optional) + Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `proxy_config` block supports: + +* `secret_arn` - + (Required) + The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +* `secret_version` - + (Required) + The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +The `root_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `throughput` - + (Optional) + Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `ssh_config` block supports: + +* `ec2_key_pair` - + (Required) + The name of the EC2 key pair used to login into cluster machines. + +The `logging_config` block supports: + +* `component_config` - + (Optional) + Configuration of the logging components. + +The `component_config` block supports: + +* `enable_components` - + (Optional) + Components of the logging configuration to be enabled. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/awsClusters/{{name}}` + +* `create_time` - + Output only. The time at which this cluster was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `endpoint` - + Output only. The endpoint of the cluster's API server. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the cluster. + +* `state` - + Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the cluster. + +* `update_time` - + Output only. The time at which this cluster was last updated. + +* `workload_identity_config` - + Output only. Workload Identity settings. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Cluster can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/awsClusters/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cluster using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}" + to = google_container_aws_cluster.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Cluster can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_aws_cluster.default projects/{{project}}/locations/{{location}}/awsClusters/{{name}} +$ terraform import google_container_aws_cluster.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_aws_cluster.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown new file mode 100644 index 000000000000..986f1f4b164d --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown @@ -0,0 +1,822 @@ +--- +subcategory: "ContainerAws" +description: |- + An Anthos node pool running on AWS. +--- + +# google_container_aws_node_pool + +An Anthos node pool running on AWS. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_aws_cluster +A basic example of a containeraws node pool +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + management { + auto_repair = true + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + project = "my-project-name" +} + +``` +## Example Usage - basic_enum_aws_cluster +A basic example of a containeraws node pool with lowercase enums +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "my-project-name" +} + + +``` +## Example Usage - beta_basic_enum_aws_cluster +A basic example of a containeraws node pool with lowercase enums (beta) +```hcl +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "dedicated" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `autoscaling` - + (Required) + Autoscaler configuration for this node pool. + +* `cluster` - + (Required) + The awsCluster for the resource + +* `config` - + (Required) + The configuration of the node pool. + +* `location` - + (Required) + The location for the resource + +* `max_pods_constraint` - + (Required) + The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. + +* `name` - + (Required) + The name of this resource. + +* `subnet_id` - + (Required) + The subnet where the node pool node run. + +* `version` - + (Required) + The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig. + + + +The `autoscaling` block supports: + +* `max_node_count` - + (Required) + Maximum number of nodes in the NodePool. Must be >= min_node_count. + +* `min_node_count` - + (Required) + Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + +The `config` block supports: + +* `autoscaling_metrics_collection` - + (Optional) + Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled. + +* `config_encryption` - + (Required) + The ARN of the AWS KMS key used to encrypt node pool configuration. + +* `iam_instance_profile` - + (Required) + The name of the AWS IAM role assigned to nodes in the pool. + +* `image_type` - + (Optional) + (Beta only) The OS image type to use on node pool instances. + +* `instance_placement` - + (Optional) + (Beta only) Details of placement information for an instance. + +* `instance_type` - + (Optional) + Optional. The AWS instance type. When unspecified, it defaults to `m5.large`. + +* `labels` - + (Optional) + Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. + +* `security_group_ids` - + (Optional) + Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster. + +* `spot_config` - + (Optional) + (Beta only) Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type` + +* `ssh_config` - + (Optional) + Optional. The SSH configuration. + +* `tags` - + (Optional) + Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `taints` - + (Optional) + Optional. The initial taints assigned to nodes of this node pool. + +The `max_pods_constraint` block supports: + +* `max_pods_per_node` - + (Required) + The maximum number of pods to schedule on a single node. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `kubelet_config` - + (Optional) + The kubelet configuration for the node pool. + +* `management` - + (Optional) + The Management configuration for this node pool. + +* `project` - + (Optional) + The project for the resource + +* `update_settings` - + (Optional) + Optional. Update settings control the speed and disruption of the node pool update. + + + +The `autoscaling_metrics_collection` block supports: + +* `granularity` - + (Required) + The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is "1Minute". + +* `metrics` - + (Optional) + The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled. + +The `config_encryption` block supports: + +* `kms_key_arn` - + (Required) + The ARN of the AWS KMS key used to encrypt node pool configuration. + +The `instance_placement` block supports: + +* `tenancy` - + (Optional) + The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST + +The `proxy_config` block supports: + +* `secret_arn` - + (Required) + The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +* `secret_version` - + (Required) + The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +The `root_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `throughput` - + (Optional) + Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `spot_config` block supports: + +* `instance_types` - + (Required) + List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory + +The `ssh_config` block supports: + +* `ec2_key_pair` - + (Required) + The name of the EC2 key pair used to login into cluster machines. + +The `taints` block supports: + +* `effect` - + (Required) + The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE + +* `key` - + (Required) + Key for the taint. + +* `value` - + (Required) + Value for the taint. + +The `kubelet_config` block supports: + +* `cpu_cfs_quota` - + (Optional) + Whether or not to enable CPU CFS quota. Defaults to true. + +* `cpu_cfs_quota_period` - + (Optional) + Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + +* `cpu_manager_policy` - + (Optional) + The CpuManagerPolicy to use for the node. Defaults to "none". + +* `pod_pids_limit` - + (Optional) + Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. + +The `management` block supports: + +* `auto_repair` - + (Optional) + Optional. Whether or not the nodes will be automatically repaired. + +The `update_settings` block supports: + +* `surge_settings` - + (Optional) + Optional. Settings for surge update. + +The `surge_settings` block supports: + +* `max_surge` - + (Optional) + Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process. + +* `max_unavailable` - + (Optional) + Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}` + +* `create_time` - + Output only. The time at which this node pool was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the node pool. + +* `state` - + Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the node pool. + +* `update_time` - + Output only. The time at which this node pool was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +NodePool can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}` +* `{{project}}/{{location}}/{{cluster}}/{{name}}` +* `{{location}}/{{cluster}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import NodePool using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}" + to = google_container_aws_node_pool.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), NodePool can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_aws_node_pool.default projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}} +$ terraform import google_container_aws_node_pool.default {{project}}/{{location}}/{{cluster}}/{{name}} +$ terraform import google_container_aws_node_pool.default {{location}}/{{cluster}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown new file mode 100644 index 000000000000..fce523a2c3e2 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown @@ -0,0 +1,105 @@ +--- +subcategory: "ContainerAzure" +description: |- + AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. +--- + +# google_container_azure_client + +AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_azure_client +A basic example of a containerazure azure client +```hcl +resource "google_container_azure_client" "primary" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `application_id` - + (Required) + The Azure Active Directory Application ID. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `tenant_id` - + (Required) + The Azure Active Directory Tenant ID. + + + +- - - + +* `project` - + (Optional) + The project for the resource + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClients/{{name}}` + +* `certificate` - + Output only. The PEM encoded x509 certificate. + +* `create_time` - + Output only. The time at which this resource was created. + +* `uid` - + Output only. A globally unique identifier for the client. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Client can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/azureClients/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Client using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/azureClients/{{name}}" + to = google_container_azure_client.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Client can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_azure_client.default projects/{{project}}/locations/{{location}}/azureClients/{{name}} +$ terraform import google_container_azure_client.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_azure_client.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown new file mode 100644 index 000000000000..091b0c590615 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown @@ -0,0 +1,425 @@ +--- +subcategory: "ContainerAzure" +description: |- + An Anthos cluster running on Azure. +--- + +# google_container_azure_cluster + +An Anthos cluster running on Azure. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_azure_cluster +A basic example of a containerazure azure cluster +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + admin_groups { + group = "group@domain.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" +} + +resource "google_container_azure_client" "basic" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + + +``` +## Example Usage - beta_basic_enum_azure_cluster +A basic example of a containerazure azure cluster with lowercase enums (beta) +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `authorization` - + (Required) + Configuration related to the cluster RBAC settings. + +* `azure_region` - + (Required) + The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region. + +* `control_plane` - + (Required) + Configuration related to the cluster control plane. + +* `fleet` - + (Required) + Fleet configuration. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `networking` - + (Required) + Cluster-wide networking configuration. + +* `resource_group_id` - + (Required) + The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*` + + + +The `authorization` block supports: + +* `admin_groups` - + (Optional) + Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +* `admin_users` - + (Required) + Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +The `control_plane` block supports: + +* `database_encryption` - + (Optional) + Optional. Configuration related to application-layer secrets encryption. + +* `main_volume` - + (Optional) + Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `replica_placements` - + (Optional) + Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk. + +* `ssh_config` - + (Required) + SSH configuration for how to access the underlying control plane machines. + +* `subnet_id` - + (Required) + The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`. + +* `tags` - + (Optional) + Optional. A set of tags to apply to all underlying control plane Azure resources. + +* `version` - + (Required) + The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig. + +* `vm_size` - + (Optional) + Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`. + +The `fleet` block supports: + +* `membership` - + The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. + +* `project` - + (Optional) + The number of the Fleet host project where this cluster will be registered. + +The `networking` block supports: + +* `pod_address_cidr_blocks` - + (Required) + The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `service_address_cidr_blocks` - + (Required) + The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster. + +* `virtual_network_id` - + (Required) + The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `azure_services_authentication` - + (Optional) + Azure authentication configuration for management of Azure resources + +* `client` - + (Optional) + Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names. + +* `description` - + (Optional) + Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes. + +* `logging_config` - + (Optional) + (Beta only) Logging configuration. + +* `project` - + (Optional) + The project for the resource + + + +The `admin_groups` block supports: + +* `group` - + (Required) + The name of the group, e.g. `my-group@domain.com`. + +The `admin_users` block supports: + +* `username` - + (Required) + The name of the user, e.g. `my-gcp-id@gmail.com`. + +The `azure_services_authentication` block supports: + +* `application_id` - + (Required) + The Azure Active Directory Application ID for Authentication configuration. + +* `tenant_id` - + (Required) + The Azure Active Directory Tenant ID for Authentication configuration. + +The `database_encryption` block supports: + +* `key_id` - + (Required) + The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported. + +The `main_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `proxy_config` block supports: + +* `resource_group_id` - + (Required) + The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` + +* `secret_id` - + (Required) + The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. + +The `replica_placements` block supports: + +* `azure_availability_zone` - + (Required) + For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk. + +* `subnet_id` - + (Required) + For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration. + +The `root_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `ssh_config` block supports: + +* `authorized_key` - + (Required) + The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page. + +The `logging_config` block supports: + +* `component_config` - + (Optional) + Configuration of the logging components. + +The `component_config` block supports: + +* `enable_components` - + (Optional) + Components of the logging configuration to be enabled. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClusters/{{name}}` + +* `create_time` - + Output only. The time at which this cluster was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `endpoint` - + Output only. The endpoint of the cluster's API server. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the cluster. + +* `state` - + Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the cluster. + +* `update_time` - + Output only. The time at which this cluster was last updated. + +* `workload_identity_config` - + Output only. Workload Identity settings. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Cluster can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/azureClusters/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cluster using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}" + to = google_container_azure_cluster.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Cluster can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_azure_cluster.default projects/{{project}}/locations/{{location}}/azureClusters/{{name}} +$ terraform import google_container_azure_cluster.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_azure_cluster.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown new file mode 100644 index 000000000000..127c6371f325 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown @@ -0,0 +1,319 @@ +--- +subcategory: "ContainerAzure" +description: |- + An Anthos node pool running on Azure. +--- + +# google_container_azure_node_pool + +An Anthos node pool running on Azure. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_azure_node_pool +A basic example of a containerazure azure node pool +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" +} + +resource "google_container_azure_client" "basic" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + secret_id = "https://my--dev-keyvault.vault.azure.net/secrets/my--dev-secret/0000000000000000000000000000000000" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_one = "label_one" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-one = "value-one" + } + + management { + auto_repair = true + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `autoscaling` - + (Required) + Autoscaler configuration for this node pool. + +* `cluster` - + (Required) + The azureCluster for the resource + +* `config` - + (Required) + The node configuration of the node pool. + +* `location` - + (Required) + The location for the resource + +* `max_pods_constraint` - + (Required) + The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. + +* `name` - + (Required) + The name of this resource. + +* `subnet_id` - + (Required) + The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration. + +* `version` - + (Required) + The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool. + + + +The `autoscaling` block supports: + +* `max_node_count` - + (Required) + Maximum number of nodes in the node pool. Must be >= min_node_count. + +* `min_node_count` - + (Required) + Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count. + +The `config` block supports: + +* `image_type` - + (Optional) + (Beta only) The OS image type to use on node pool instances. + +* `labels` - + (Optional) + Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk. + +* `ssh_config` - + (Required) + SSH configuration for how to access the node pool machines. + +* `tags` - + (Optional) + Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `vm_size` - + (Optional) + Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`. + +The `max_pods_constraint` block supports: + +* `max_pods_per_node` - + (Required) + The maximum number of pods to schedule on a single node. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `azure_availability_zone` - + (Optional) + Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`. + +* `management` - + (Optional) + The Management configuration for this node pool. + +* `project` - + (Optional) + The project for the resource + + + +The `proxy_config` block supports: + +* `resource_group_id` - + (Required) + The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` + +* `secret_id` - + (Required) + The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. + +The `root_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `ssh_config` block supports: + +* `authorized_key` - + (Required) + The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page. + +The `management` block supports: + +* `auto_repair` - + (Optional) + Optional. Whether or not the nodes will be automatically repaired. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}` + +* `create_time` - + Output only. The time at which this node pool was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently pending changes to the node pool. + +* `state` - + Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the node pool. + +* `update_time` - + Output only. The time at which this node pool was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +NodePool can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}` +* `{{project}}/{{location}}/{{cluster}}/{{name}}` +* `{{location}}/{{cluster}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import NodePool using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}" + to = google_container_azure_node_pool.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), NodePool can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_azure_node_pool.default projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}} +$ terraform import google_container_azure_node_pool.default {{project}}/{{location}}/{{cluster}}/{{name}} +$ terraform import google_container_azure_node_pool.default {{location}}/{{cluster}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown new file mode 100644 index 000000000000..847fcf9d7e4c --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown @@ -0,0 +1,270 @@ +--- +subcategory: "Dataplex" +description: |- + The Dataplex Asset resource +--- + +# google_dataplex_asset + +The Dataplex Asset resource + +## Example Usage - basic_asset +```hcl +resource "google_storage_bucket" "basic_bucket" { + name = "bucket" + location = "us-west1" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "my-project-name" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "lake" + location = "us-west1" + project = "my-project-name" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "zone" + location = "us-west1" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "my-project-name" +} + + +resource "google_dataplex_asset" "primary" { + name = "asset" + location = "us-west1" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/my-project-name/buckets/bucket" + type = "STORAGE_BUCKET" + } + + labels = { + env = "foo" + my-asset = "exists" + } + + + project = "my-project-name" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `dataplex_zone` - + (Required) + The zone for the resource + +* `discovery_spec` - + (Required) + Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone. + +* `lake` - + (Required) + The lake for the resource + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of the asset. + +* `resource_spec` - + (Required) + Required. Immutable. Specification of the resource that is referenced by this asset. + + + +The `discovery_spec` block supports: + +* `csv_options` - + (Optional) + Optional. Configuration for CSV data. + +* `enabled` - + (Required) + Required. Whether discovery is enabled. + +* `exclude_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `include_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `json_options` - + (Optional) + Optional. Configuration for Json data. + +* `schedule` - + (Optional) + Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + +The `resource_spec` block supports: + +* `name` - + (Optional) + Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}` + +* `read_access_mode` - + (Optional) + Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED + +* `type` - + (Required) + Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET + +- - - + +* `description` - + (Optional) + Optional. Description of the asset. + +* `display_name` - + (Optional) + Optional. User friendly display name. + +* `labels` - + (Optional) + Optional. User defined labels for the asset. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + + + +The `csv_options` block supports: + +* `delimiter` - + (Optional) + Optional. The delimiter being used to separate values. This defaults to ','. + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings. + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +* `header_rows` - + (Optional) + Optional. The number of rows to interpret as header rows that should be skipped when reading data rows. + +The `json_options` block supports: + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean). + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}` + +* `create_time` - + Output only. The time when the asset was created. + +* `discovery_status` - + Output only. Status of the discovery feature applied to data referenced by this asset. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `resource_status` - + Output only. Status of the resource referenced by this asset. + +* `security_status` - + Output only. Status of the security policy applied to resource referenced by this asset. + +* `state` - + Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name. + +* `update_time` - + Output only. The time when the asset was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Asset can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}` +* `{{project}}/{{location}}/{{lake}}/{{dataplex_zone}}/{{name}}` +* `{{location}}/{{lake}}/{{dataplex_zone}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Asset using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}" + to = google_dataplex_asset.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Asset can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataplex_asset.default projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}} +$ terraform import google_dataplex_asset.default {{project}}/{{location}}/{{lake}}/{{dataplex_zone}}/{{name}} +$ terraform import google_dataplex_asset.default {{location}}/{{lake}}/{{dataplex_zone}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown new file mode 100644 index 000000000000..aa9140250af2 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown @@ -0,0 +1,144 @@ +--- +subcategory: "Dataplex" +description: |- + The Dataplex Lake resource +--- + +# google_dataplex_lake + +The Dataplex Lake resource + +## Example Usage - basic_lake +A basic example of a dataplex lake +```hcl +resource "google_dataplex_lake" "primary" { + location = "us-west1" + name = "lake" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "my-project-name" + + labels = { + my-lake = "exists" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of the lake. + + + +- - - + +* `description` - + (Optional) + Optional. Description of the lake. + +* `display_name` - + (Optional) + Optional. User friendly display name. + +* `labels` - + (Optional) + Optional. User-defined labels for the lake. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `metastore` - + (Optional) + Optional. Settings to manage lake and Dataproc Metastore service instance association. + +* `project` - + (Optional) + The project for the resource + + + +The `metastore` block supports: + +* `service` - + (Optional) + Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}` + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/lakes/{{name}}` + +* `asset_status` - + Output only. Aggregated status of the underlying assets of the lake. + +* `create_time` - + Output only. The time when the lake was created. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `metastore_status` - + Output only. Metastore status of the lake. + +* `service_account` - + Output only. Service account associated with this lake. This service account must be authorized to access or operate on resources managed by the lake. + +* `state` - + Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. System generated globally unique ID for the lake. This ID will be different if the lake is deleted and re-created with the same name. + +* `update_time` - + Output only. The time when the lake was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Lake can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/lakes/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/lakes/{{name}}" + to = google_dataplex_lake.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Lake can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataplex_lake.default projects/{{project}}/locations/{{location}}/lakes/{{name}} +$ terraform import google_dataplex_lake.default {{project}}/{{location}}/{{name}} +$ terraform import google_dataplex_lake.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown new file mode 100644 index 000000000000..80977fc6d17b --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown @@ -0,0 +1,224 @@ +--- +subcategory: "Dataplex" +description: |- + The Dataplex Zone resource +--- + +# google_dataplex_zone + +The Dataplex Zone resource + +## Example Usage - basic_zone +A basic example of a dataplex zone +```hcl +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "us-west1" + name = "zone" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL" + display_name = "Zone for DCL" + project = "my-project-name" + labels = {} +} + +resource "google_dataplex_lake" "basic" { + location = "us-west1" + name = "lake" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "my-project-name" + + labels = { + my-lake = "exists" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `discovery_spec` - + (Required) + Required. Specification of the discovery feature applied to data in this zone. + +* `lake` - + (Required) + The lake for the resource + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of the zone. + +* `resource_spec` - + (Required) + Required. Immutable. Specification of the resources that are referenced by the assets within this zone. + +* `type` - + (Required) + Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED + + + +The `discovery_spec` block supports: + +* `csv_options` - + (Optional) + Optional. Configuration for CSV data. + +* `enabled` - + (Required) + Required. Whether discovery is enabled. + +* `exclude_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `include_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `json_options` - + (Optional) + Optional. Configuration for Json data. + +* `schedule` - + (Optional) + Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + +The `resource_spec` block supports: + +* `location_type` - + (Required) + Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION + +- - - + +* `description` - + (Optional) + Optional. Description of the zone. + +* `display_name` - + (Optional) + Optional. User friendly display name. + +* `labels` - + (Optional) + Optional. User defined labels for the zone. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + + + +The `csv_options` block supports: + +* `delimiter` - + (Optional) + Optional. The delimiter being used to separate values. This defaults to ','. + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings. + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +* `header_rows` - + (Optional) + Optional. The number of rows to interpret as header rows that should be skipped when reading data rows. + +The `json_options` block supports: + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean). + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}` + +* `asset_status` - + Output only. Aggregated status of the underlying assets of the zone. + +* `create_time` - + Output only. The time when the zone was created. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `state` - + Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name. + +* `update_time` - + Output only. The time when the zone was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Zone can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}` +* `{{project}}/{{location}}/{{lake}}/{{name}}` +* `{{location}}/{{lake}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Zone using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}" + to = google_dataplex_zone.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Zone can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataplex_zone.default projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}} +$ terraform import google_dataplex_zone.default {{project}}/{{location}}/{{lake}}/{{name}} +$ terraform import google_dataplex_zone.default {{location}}/{{lake}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown b/mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown new file mode 100644 index 000000000000..815a3e1adda8 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown @@ -0,0 +1,171 @@ +--- +subcategory: "Firebaserules" +description: |- + +--- + +# google_firebaserules_release + + + +For more information, see: +* [Get started with Firebase Security Rules](https://firebase.google.com/docs/rules/get-started) +## Example Usage - firestore_release +Creates a Firebase Rules Release to the default Cloud Firestore instance +```hcl +resource "google_firebaserules_release" "primary" { + name = "cloud.firestore" + project = "my-project-name" + ruleset_name = "projects/my-project-name/rulesets/${google_firebaserules_ruleset.firestore.name}" +} + +resource "google_firebaserules_ruleset" "firestore" { + project = "my-project-name" + + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } +} + +``` +## Example Usage - firestore_release_additional +Creates a Firebase Rules Release to an additional Cloud Firestore instance +```hcl +resource "google_firebaserules_release" "primary" { + name = "cloud.firestore/database" + project = "my-project-name" + ruleset_name = "projects/my-project-name/rulesets/${google_firebaserules_ruleset.firestore.name}" +} + +resource "google_firebaserules_ruleset" "firestore" { + project = "my-project-name" + + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } +} + +``` +## Example Usage - storage_release +Creates a Firebase Rules Release for a Storage bucket +```hcl +resource "google_firebaserules_release" "primary" { + provider = google-beta + name = "firebase.storage/${google_storage_bucket.bucket.name}" + ruleset_name = "projects/my-project-name/rulesets/${google_firebaserules_ruleset.storage.name}" + project = "my-project-name" + + lifecycle { + replace_triggered_by = [ + google_firebaserules_ruleset.storage + ] + } +} + +# Provision a non-default Cloud Storage bucket. +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = "my-project-name" + name = "bucket" + location = "us-west1" +} + +# Make the Storage bucket accessible for Firebase SDKs, authentication, and Firebase Security Rules. +resource "google_firebase_storage_bucket" "bucket" { + provider = google-beta + project = "my-project-name" + bucket_id = google_storage_bucket.bucket.name +} + +# Create a ruleset of Firebase Security Rules from a local file. +resource "google_firebaserules_ruleset" "storage" { + provider = google-beta + project = "my-project-name" + source { + files { + name = "storage.rules" + content = "service firebase.storage {match /b/{bucket}/o {match /{allPaths=**} {allow read, write: if request.auth != null;}}}" + } + } + + depends_on = [ + google_firebase_storage_bucket.bucket + ] +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + Format: `projects/{project_id}/releases/{release_id}`\Firestore Rules Releases will **always** have the name 'cloud.firestore' + +* `ruleset_name` - + (Required) + Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created. + + + +- - - + +* `project` - + (Optional) + The project for the resource + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/releases/{{name}}` + +* `create_time` - + Output only. Time the release was created. + +* `disabled` - + Disable the release to keep it from being served. The response code of NOT_FOUND will be given for executables generated from this Release. + +* `update_time` - + Output only. Time the release was updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Release can be imported using any of these accepted formats: +* `projects/{{project}}/releases/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Release using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/releases/{{name}}" + to = google_firebaserules_release.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Release can be imported using one of the formats above. For example: + +``` +$ terraform import google_firebaserules_release.default projects/{{project}}/releases/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown b/mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown new file mode 100644 index 000000000000..f1b3ff4a0887 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown @@ -0,0 +1,140 @@ +--- +subcategory: "Firebaserules" +description: |- + +--- + +# google_firebaserules_ruleset + + + +For more information, see: +* [Get started with Firebase Security Rules](https://firebase.google.com/docs/rules/get-started) +## Example Usage - basic_ruleset +Creates a basic Firestore ruleset +```hcl +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + fingerprint = "" + } + + language = "" + } + + project = "my-project-name" +} + + +``` +## Example Usage - minimal_ruleset +Creates a minimal Firestore ruleset +```hcl +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `source` - + (Required) + `Source` for the `Ruleset`. + + + +The `source` block supports: + +* `files` - + (Required) + `File` set constituting the `Source` bundle. + +* `language` - + (Optional) + `Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS + +- - - + +* `project` - + (Optional) + The project for the resource + + + +The `files` block supports: + +* `content` - + (Required) + Textual Content. + +* `fingerprint` - + (Optional) + Fingerprint (e.g. github sha) associated with the `File`. + +* `name` - + (Required) + File name. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/rulesets/{{name}}` + +* `create_time` - + Output only. Time the `Ruleset` was created. + +* `metadata` - + Output only. The metadata for this ruleset. + +* `name` - + Output only. Name of the `Ruleset`. The ruleset_id is auto generated by the service. Format: `projects/{project_id}/rulesets/{ruleset_id}` + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Ruleset can be imported using any of these accepted formats: +* `projects/{{project}}/rulesets/{{name}}` +* `{{project}}/{{name}}` +* `{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Ruleset using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/rulesets/{{name}}" + to = google_firebaserules_ruleset.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Ruleset can be imported using one of the formats above. For example: + +``` +$ terraform import google_firebaserules_ruleset.default projects/{{project}}/rulesets/{{name}} +$ terraform import google_firebaserules_ruleset.default {{project}}/{{name}} +$ terraform import google_firebaserules_ruleset.default {{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown b/mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown new file mode 100644 index 000000000000..9f050594d1ad --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown @@ -0,0 +1,319 @@ +--- +subcategory: "RecaptchaEnterprise" +description: |- + The RecaptchaEnterprise Key resource +--- + +# google_recaptcha_enterprise_key + +The RecaptchaEnterprise Key resource + +## Example Usage - android_key +A basic test of recaptcha enterprise key that can be used by Android apps +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + android_settings { + allow_all_package_names = true + allowed_package_names = [] + } + + project = "my-project-name" + + testing_options { + testing_score = 0.8 + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - ios_key +A basic test of recaptcha enterprise key that can be used by iOS apps +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + ios_settings { + allow_all_bundle_ids = true + allowed_bundle_ids = [] + } + + project = "my-project-name" + + testing_options { + testing_score = 1 + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - minimal_key +A minimal test of recaptcha enterprise key +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + } + + labels = {} +} + + +``` +## Example Usage - waf_key +A basic test of recaptcha enterprise key that includes WAF settings +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + waf_settings { + waf_feature = "CHALLENGE_PAGE" + waf_service = "CA" + } + + web_settings { + integration_type = "INVISIBLE" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - web_key +A basic test of recaptcha enterprise key that can be used by websites +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - web_score_key +A basic test of recaptcha enterprise key with score integration type that can be used by websites +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + allow_amp_traffic = false + allowed_domains = [] + } + + labels = { + label-one = "value-one" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `display_name` - + (Required) + Human-readable display name of this key. Modifiable by user. + + + +- - - + +* `android_settings` - + (Optional) + Settings for keys that can be used by Android apps. + +* `ios_settings` - + (Optional) + Settings for keys that can be used by iOS apps. + +* `labels` - + (Optional) + See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels). + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + +* `testing_options` - + (Optional) + Options for user acceptance testing. + +* `waf_settings` - + (Optional) + Settings specific to keys that can be used for WAF (Web Application Firewall). + +* `web_settings` - + (Optional) + Settings for keys that can be used by websites. + + + +The `android_settings` block supports: + +* `allow_all_package_names` - + (Optional) + If set to true, it means allowed_package_names will not be enforced. + +* `allowed_package_names` - + (Optional) + Android package names of apps allowed to use the key. Example: 'com.companyname.appname' + +The `ios_settings` block supports: + +* `allow_all_bundle_ids` - + (Optional) + If set to true, it means allowed_bundle_ids will not be enforced. + +* `allowed_bundle_ids` - + (Optional) + iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname' + +The `testing_options` block supports: + +* `testing_challenge` - + (Optional) + For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE + +* `testing_score` - + (Optional) + All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive. + +The `waf_settings` block supports: + +* `waf_feature` - + (Required) + Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS + +* `waf_service` - + (Required) + The WAF service that uses this key. Possible values: CA, FASTLY + +The `web_settings` block supports: + +* `allow_all_domains` - + (Optional) + If set to true, it means allowed_domains will not be enforced. + +* `allow_amp_traffic` - + (Optional) + If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type. + +* `allowed_domains` - + (Optional) + Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com' + +* `challenge_security_preference` - + (Optional) + Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY + +* `integration_type` - + (Required) + Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/keys/{{name}}` + +* `create_time` - + The timestamp corresponding to the creation of this Key. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `name` - + The resource id for the Key, which is the same as the Site Key itself. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Key can be imported using any of these accepted formats: +* `projects/{{project}}/keys/{{name}}` +* `{{project}}/{{name}}` +* `{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Key using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/keys/{{name}}" + to = google_recaptcha_enterprise_key.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Key can be imported using one of the formats above. For example: + +``` +$ terraform import google_recaptcha_enterprise_key.default projects/{{project}}/keys/{{name}} +$ terraform import google_recaptcha_enterprise_key.default {{project}}/{{name}} +$ terraform import google_recaptcha_enterprise_key.default {{name}} +``` + + +