From 073510daf144424c40452eeec5e74077ef22b1f5 Mon Sep 17 00:00:00 2001 From: Daniel Isen Date: Wed, 4 Dec 2024 17:07:45 -0500 Subject: [PATCH] [CLOUDGA-25006] Deprecate root node info --- .gitignore | 2 + docs/data-sources/cluster.md | 2 +- docs/resources/cluster.md | 27 ++-- managed/data_source_cluster_name.go | 1 + managed/resource_cluster.go | 239 ++++++++++++++-------------- 5 files changed, 138 insertions(+), 133 deletions(-) diff --git a/.gitignore b/.gitignore index a4bc69a..7eddec9 100644 --- a/.gitignore +++ b/.gitignore @@ -66,3 +66,5 @@ terraform # Ignore Intellij files. .idea/ + +terraform-provider-ybm diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index 9729cd1..ac3c403 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -47,7 +47,7 @@ data "ybm_cluster" "example_cluster" { - `desired_state` (String) The desired state of the database, Active or Paused. This parameter can be used to pause/resume a cluster. - `endpoints` (Attributes List) The endpoints used to connect to the cluster. (see [below for nested schema](#nestedatt--endpoints)) - `fault_tolerance` (String) The fault tolerance of the cluster. -- `node_config` (Attributes) (see [below for nested schema](#nestedatt--node_config)) +- `node_config` (Attributes, Deprecated) (see [below for nested schema](#nestedatt--node_config)) - `num_faults_to_tolerate` (Number) The number of domain faults the cluster can tolerate. - `project_id` (String) The ID of the project this cluster belongs to. - `restore_backup_id` (String) The ID of the backup to be restored to the cluster. diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index 18d86b2..4e67061 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -608,7 +608,6 @@ resource "ybm_private_service_endpoint" "npsenonok-region" { - `credentials` (Attributes) Credentials to be used by the database. Please provide 'username' and 'password' (which would be used in common for both YSQL and YCQL) OR all of 'ysql_username', 'ysql_password', 'ycql_username' and 'ycql_password' but not a mix of both. (see [below for nested schema](#nestedatt--credentials)) -- `node_config` (Attributes) (see [below for nested schema](#nestedatt--node_config)) ### Optional @@ -619,6 +618,7 @@ resource "ybm_private_service_endpoint" "npsenonok-region" { - `database_track` (String) The track of the database. Production or Innovation or Preview. - `desired_state` (String) The desired state of the database, Active or Paused. This parameter can be used to pause/resume a cluster. - `fault_tolerance` (String) The fault tolerance of the cluster. NONE, NODE, ZONE or REGION. +- `node_config` (Attributes, Deprecated) (see [below for nested schema](#nestedatt--node_config)) - `num_faults_to_tolerate` (Number) The number of domain faults the cluster can tolerate. 0 for NONE, 1 for ZONE and [1-3] for NODE and REGION - `restore_backup_id` (String) The ID of the backup to be restored to the cluster. @@ -638,6 +638,7 @@ resource "ybm_private_service_endpoint" "npsenonok-region" { Required: +- `num_cores` (Number) Number of CPU cores in the nodes of the region. - `num_nodes` (Number) - `region` (String) @@ -647,7 +648,6 @@ Optional: - `disk_size_gb` (Number) Disk size of the nodes of the region. - `is_default` (Boolean) - `is_preferred` (Boolean) -- `num_cores` (Number) Number of CPU cores in the nodes of the region. - `public_access` (Boolean) - `vpc_id` (String) - `vpc_name` (String) @@ -666,19 +666,6 @@ Optional: - `ysql_username` (String) YSQL username for the database. - -### Nested Schema for `node_config` - -Required: - -- `num_cores` (Number) Number of CPU cores in the node. - -Optional: - -- `disk_iops` (Number) Disk IOPS of the node. -- `disk_size_gb` (Number) Disk size of the node. - - ### Nested Schema for `backup_schedules` @@ -763,6 +750,16 @@ Optional: + +### Nested Schema for `node_config` + +Optional: + +- `disk_iops` (Number) Disk IOPS of the node. +- `disk_size_gb` (Number) Disk size of the node. +- `num_cores` (Number) Number of CPU cores in the node. + + ### Nested Schema for `cluster_info` diff --git a/managed/data_source_cluster_name.go b/managed/data_source_cluster_name.go index b895362..5832fd8 100644 --- a/managed/data_source_cluster_name.go +++ b/managed/data_source_cluster_name.go @@ -336,6 +336,7 @@ func (r dataClusterNameType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Di Computed: true, }, }), + DeprecationMessage: "Remove reliance on the attribute as it will be removed in the next major version of the provider. Please use cluster_region_info to read node config instead.", }, "credentials": { Computed: true, diff --git a/managed/resource_cluster.go b/managed/resource_cluster.go index 2c2f5df..8de3102 100644 --- a/managed/resource_cluster.go +++ b/managed/resource_cluster.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - retry "github.com/sethvargo/go-retry" + "github.com/sethvargo/go-retry" "github.com/yugabyte/terraform-provider-ybm/managed/fflags" "github.com/yugabyte/terraform-provider-ybm/managed/util" openapiclient "github.com/yugabyte/yugabytedb-managed-go-client-internal" @@ -85,8 +85,7 @@ func (r resourceClusterType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Di "num_cores": { Description: "Number of CPU cores in the nodes of the region.", Type: types.Int64Type, - Optional: true, - Computed: true, + Required: true, }, "disk_size_gb": { Description: "Disk size of the nodes of the region.", @@ -373,12 +372,14 @@ func (r resourceClusterType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Di Optional: true, }, "node_config": { - Required: true, + Optional: true, + Computed: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ "num_cores": { Description: "Number of CPU cores in the node.", Type: types.Int64Type, - Required: true, + Optional: true, + Computed: true, }, "disk_size_gb": { Description: "Disk size of the node.", @@ -393,6 +394,7 @@ func (r resourceClusterType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Di Optional: true, }, }), + DeprecationMessage: "Remove this attribute's configuration as it's no longer in use and the attribute will be removed in the next major version of the provider. Please use cluster_region_info to specify node config instead.", }, "credentials": { Description: `Credentials to be used by the database. Please provide 'username' and 'password' @@ -577,7 +579,7 @@ func editBackupScheduleV2(ctx context.Context, backupScheduleStruct BackupSchedu backupScheduleSpec.UnsetTimeIntervalInDays() } if backupScheduleStruct.TimeIntervalInDays.Value != 0 && backupScheduleStruct.CronExpression.Value != "" { - return errors.New("Unable to create custom backup schedule. You can't pass both the cron expression and time interval in days.") + return errors.New("unable to create custom backup schedule. You can't pass both the cron expression and time interval in days") } _, res, err := apiClient.BackupApi.ModifyBackupScheduleV2(ctx, accountId, projectId, clusterId, scheduleId).ScheduleSpecV2(backupScheduleSpec).Execute() @@ -611,7 +613,7 @@ func createClusterSpec(ctx context.Context, apiClient *openapiclient.APIClient, softwareInfo.SetTrackId(trackId) } - clusterRegionInfo := []openapiclient.ClusterRegionInfo{} + var clusterRegionInfo []openapiclient.ClusterRegionInfo totalNodes := 0 clusterType := plan.ClusterType.Value isDefaultSet := false @@ -633,41 +635,37 @@ func createClusterSpec(ctx context.Context, apiClient *openapiclient.APIClient, regionInfo.VPCID.Value = vpcData.Info.Id } - if !regionInfo.NumCores.IsUnknown() && !regionInfo.NumCores.IsNull() { - cloud := plan.CloudType.Value - tier := plan.ClusterTier.Value - region := regionInfo.Region.Value - numCores := int32(regionInfo.NumCores.Value) - memoryMb, memoryOK, message = getMemoryFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) - if !memoryOK { - return nil, false, message - } - - if !regionInfo.DiskSizeGb.IsUnknown() && !regionInfo.DiskSizeGb.IsNull() { - diskSizeGb = int32(regionInfo.DiskSizeGb.Value) - } else { - diskSizeGb, diskSizeOK, message = getDiskSizeFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) - if !diskSizeOK { - return nil, false, message - } - } + cloud := plan.CloudType.Value + tier := plan.ClusterTier.Value + region := regionInfo.Region.Value + numCores := int32(regionInfo.NumCores.Value) + memoryMb, memoryOK, message = getMemoryFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) + if !memoryOK { + return nil, false, message + } - nodeInfo := *openapiclient.NewOptionalClusterNodeInfo(numCores, memoryMb, diskSizeGb) - if !regionInfo.DiskIops.IsUnknown() && !regionInfo.DiskIops.IsNull() { - nodeInfo.SetDiskIops(int32(regionInfo.DiskIops.Value)) + if !regionInfo.DiskSizeGb.IsUnknown() && !regionInfo.DiskSizeGb.IsNull() { + diskSizeGb = int32(regionInfo.DiskSizeGb.Value) + } else { + diskSizeGb, diskSizeOK, message = getDiskSizeFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) + if !diskSizeOK { + return nil, false, message } + } - info.SetNodeInfo(nodeInfo) - } else { - info.UnsetNodeInfo() + nodeInfo := *openapiclient.NewOptionalClusterNodeInfo(numCores, memoryMb, diskSizeGb) + if !regionInfo.DiskIops.IsUnknown() && !regionInfo.DiskIops.IsNull() { + nodeInfo.SetDiskIops(int32(regionInfo.DiskIops.Value)) } + info.SetNodeInfo(nodeInfo) + // Create an array of AccessibilityType and populate it according to // the following logic: // if the cluster is in a private VPC, it MUST always have PRIVATE. // if the cluster is NOT in a private VPC, it MUST always have PUBLIC. // if the cluster is in a private VPC and customer wants public access, it MUST have PRIVATE and PUBLIC. - accessibilityTypes := []openapiclient.AccessibilityType{} + var accessibilityTypes []openapiclient.AccessibilityType if vpcID := regionInfo.VPCID.Value; vpcID != "" { info.PlacementInfo.SetVpcId(vpcID) @@ -679,7 +677,7 @@ func createClusterSpec(ctx context.Context, apiClient *openapiclient.APIClient, } else { accessibilityTypes = append(accessibilityTypes, openapiclient.ACCESSIBILITYTYPE_PUBLIC) - // If the value is specified and it is false, then it is an error because the user + // If the value is specified, and it is false, then it is an error because the user // wants disabled public access on a non-dedicated VPC cluster. if !regionInfo.PublicAccess.IsUnknown() && !regionInfo.PublicAccess.Value { tflog.Debug(ctx, fmt.Sprintf("Cluster %v is in a public VPC and public access is disabled. ", plan.ClusterName.Value)) @@ -711,30 +709,13 @@ func createClusterSpec(ctx context.Context, apiClient *openapiclient.APIClient, // This is to pass in the region information to fetch memory and disk size region := "" regionCount := len(clusterRegionInfo) - if regionCount > 0 { - region = clusterRegionInfo[0].PlacementInfo.CloudInfo.Region - if regionCount == 1 { - clusterRegionInfo[0].SetIsDefault(true) - } + region = clusterRegionInfo[0].PlacementInfo.CloudInfo.Region + if regionCount == 1 { + clusterRegionInfo[0].SetIsDefault(true) } cloud := plan.CloudType.Value tier := plan.ClusterTier.Value - numCores := int32(plan.NodeConfig.NumCores.Value) - memoryMb, memoryOK, message = getMemoryFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) - if !memoryOK { - return nil, false, message - } - - // Computing the default disk size if it is not provided - if !plan.NodeConfig.DiskSizeGb.IsUnknown() { - diskSizeGb = int32(plan.NodeConfig.DiskSizeGb.Value) - } else { - diskSizeGb, diskSizeOK, message = getDiskSizeFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) - if !diskSizeOK { - return nil, false, message - } - } // This is to support a redundant value in the API. // Needs to be removed once API cleans it up. @@ -750,12 +731,32 @@ func createClusterSpec(ctx context.Context, apiClient *openapiclient.APIClient, isProduction, ) - nodeInfo := *openapiclient.NewClusterNodeInfo(numCores, memoryMb, diskSizeGb) - if !plan.NodeConfig.DiskIops.IsUnknown() { - nodeInfo.SetDiskIops(int32(plan.NodeConfig.DiskIops.Value)) - } + if !plan.NodeConfig.NumCores.IsNull() && !plan.NodeConfig.NumCores.IsUnknown() { + numCores := int32(plan.NodeConfig.NumCores.Value) + memoryMb, memoryOK, message = getMemoryFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) + if !memoryOK { + return nil, false, message + } + + // Computing the default disk size if it is not provided + if !plan.NodeConfig.DiskSizeGb.IsUnknown() { + diskSizeGb = int32(plan.NodeConfig.DiskSizeGb.Value) + } else { + diskSizeGb, diskSizeOK, message = getDiskSizeFromInstanceType(ctx, apiClient, accountId, cloud, tier, region, numCores) + if !diskSizeOK { + return nil, false, message + } + } - clusterInfo.SetNodeInfo(nodeInfo) + nodeInfo := *openapiclient.NewClusterNodeInfo(numCores, memoryMb, diskSizeGb) + if !plan.NodeConfig.DiskIops.IsUnknown() { + nodeInfo.SetDiskIops(int32(plan.NodeConfig.DiskIops.Value)) + } + + clusterInfo.SetNodeInfo(nodeInfo) + } else { + clusterInfo.UnsetNodeInfo() + } if !plan.NumFaultsToTolerate.IsUnknown() { clusterInfo.SetNumFaultsToTolerate(int32(plan.NumFaultsToTolerate.Value)) @@ -763,8 +764,8 @@ func createClusterSpec(ctx context.Context, apiClient *openapiclient.APIClient, clusterInfo.SetClusterType(openapiclient.ClusterType(clusterType)) if clusterExists { - cluster_version, _ := strconv.Atoi(plan.ClusterVersion.Value) - clusterInfo.SetVersion(int32(cluster_version)) + clusterVersion, _ := strconv.Atoi(plan.ClusterVersion.Value) + clusterInfo.SetVersion(int32(clusterVersion)) } clusterSpec = openapiclient.NewClusterSpec( @@ -912,7 +913,7 @@ func validateOnlyOneCMKSpec(plan *Cluster) error { } if count != 1 { - return errors.New("Invalid input. Only one CMK Provider out of AWS, GCP, or AZURE must be present.") + return errors.New("invalid input. Only one CMK Provider out of AWS, GCP, or AZURE must be present") } return nil @@ -929,7 +930,7 @@ func createCmkSpec(plan Cluster) (*openapiclient.CMKSpec, error) { switch cmkProvider { case "GCP": if plan.CMKSpec.GCPCMKSpec == nil { - return nil, errors.New("Provider type is GCP but GCP CMK spec is missing.") + return nil, errors.New("provider type is GCP but GCP CMK spec is missing") } gcpKeyRingName := plan.CMKSpec.GCPCMKSpec.KeyRingName.Value gcpKeyName := plan.CMKSpec.GCPCMKSpec.KeyName.Value @@ -959,7 +960,7 @@ func createCmkSpec(plan Cluster) (*openapiclient.CMKSpec, error) { cmkSpec.SetGcpCmkSpec(*gcpCmkSpec) case "AWS": if plan.CMKSpec.AWSCMKSpec == nil { - return nil, errors.New("Provider type is AWS but AWS CMK spec is missing.") + return nil, errors.New("provider type is AWS but AWS CMK spec is missing") } awsSecretKey := plan.CMKSpec.AWSCMKSpec.SecretKey.Value awsAccessKey := plan.CMKSpec.AWSCMKSpec.AccessKey.Value @@ -973,7 +974,7 @@ func createCmkSpec(plan Cluster) (*openapiclient.CMKSpec, error) { cmkSpec.SetAwsCmkSpec(*awsCmkSpec) case "AZURE": if plan.CMKSpec.AzureCMKSpec == nil { - return nil, errors.New("Provider type is AZURE but AZURE CMK spec is missing.") + return nil, errors.New("provider type is AZURE but AZURE CMK spec is missing") } azureClientId := plan.CMKSpec.AzureCMKSpec.ClientID.Value azureClientSecret := plan.CMKSpec.AzureCMKSpec.ClientSecret.Value @@ -1063,26 +1064,19 @@ func (r resourceCluster) Create(ctx context.Context, req tfsdk.CreateResourceReq return } } - numCoresPresent := false - diskSizePresent := false - diskIopsPresent := false - if !regionInfo.NumCores.Unknown && !regionInfo.NumCores.Null { - numCoresPresent = true - } - if !regionInfo.DiskSizeGb.Unknown && !regionInfo.DiskSizeGb.Null { - diskSizePresent = true - } - if !regionInfo.DiskIops.Unknown && !regionInfo.DiskIops.Null { - diskIopsPresent = true - } - if !numCoresPresent && (diskSizePresent || diskIopsPresent) { - resp.Diagnostics.AddError( - "Specify num_cores per region, since per-region disk_size or disk_iops are specified.", - "To specify per-region node configuration, num_cores must be provided.", - ) + if !regionInfo.DiskSizeGb.IsUnknown() && !util.IsDiskSizeValid(plan.ClusterTier.Value, regionInfo.DiskSizeGb.Value) { + resp.Diagnostics.AddError("Invalid disk size in "+regionInfo.Region.Value, "The disk size for a paid cluster must be at least 50 GB.") return } + + if !(regionInfo.DiskIops.IsUnknown() || regionInfo.DiskIops.IsNull()) { + isValid, err := util.IsDiskIopsValid(plan.CloudType.Value, plan.ClusterTier.Value, regionInfo.DiskIops.Value) + if !isValid { + resp.Diagnostics.AddError("Invalid disk IOPS in "+regionInfo.Region.Value, err) + return + } + } } projectId, getProjectOK, message := getProjectId(ctx, apiClient, accountId) @@ -1181,7 +1175,7 @@ func (r resourceCluster) Create(ctx context.Context, req tfsdk.CreateResourceReq } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("The cluster creation is in progress")) + return retry.RetryableError(errors.New("the cluster creation is in progress")) }) if err != nil { @@ -1241,7 +1235,7 @@ func (r resourceCluster) Create(ctx context.Context, req tfsdk.CreateResourceReq } - allowListIDs := []string{} + var allowListIDs []string allowListProvided := false if plan.ClusterAllowListIDs != nil { for i := range plan.ClusterAllowListIDs { @@ -1272,7 +1266,7 @@ func (r resourceCluster) Create(ctx context.Context, req tfsdk.CreateResourceReq } } - regions := []string{} + var regions []string for _, regionInfo := range plan.ClusterRegionInfo { regions = append(regions, regionInfo.Region.Value) } @@ -1292,8 +1286,8 @@ func (r resourceCluster) Create(ctx context.Context, req tfsdk.CreateResourceReq providerType := cluster.CMKSpec.ProviderType.Value switch providerType { case "AWS": - cluster.CMKSpec.AWSCMKSpec.SecretKey = types.String{Value: string(cmkSpec.GetAwsCmkSpec().SecretKey)} - cluster.CMKSpec.AWSCMKSpec.AccessKey = types.String{Value: string(cmkSpec.GetAwsCmkSpec().AccessKey)} + cluster.CMKSpec.AWSCMKSpec.SecretKey = types.String{Value: cmkSpec.GetAwsCmkSpec().SecretKey} + cluster.CMKSpec.AWSCMKSpec.AccessKey = types.String{Value: cmkSpec.GetAwsCmkSpec().AccessKey} case "GCP": if cmkSpec.GetGcpCmkSpec().GcpServiceAccount.IsSet() { gcpServiceAccountData := cmkSpec.GetGcpCmkSpec().GcpServiceAccount.Get() @@ -1305,7 +1299,7 @@ func (r resourceCluster) Create(ctx context.Context, req tfsdk.CreateResourceReq } } case "AZURE": - cluster.CMKSpec.AzureCMKSpec.ClientSecret = types.String{Value: string(cmkSpec.GetAzureCmkSpec().ClientSecret)} + cluster.CMKSpec.AzureCMKSpec.ClientSecret = types.String{Value: cmkSpec.GetAzureCmkSpec().ClientSecret} } } @@ -1387,11 +1381,11 @@ func pauseCluster(ctx context.Context, apiClient *openapiclient.APIClient, accou } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("The cluster is being paused.")) + return retry.RetryableError(errors.New("the cluster is being paused")) }) if err != nil { - return errors.New("Unable to pause cluster. " + "The operation timed out waiting to pause the cluster.") + return errors.New("unable to pause cluster. " + "The operation timed out waiting to pause the cluster") } return nil @@ -1424,11 +1418,11 @@ func enableConnectionPooling(ctx context.Context, apiClient *openapiclient.APICl } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("Connection Pooling is being enabled")) + return retry.RetryableError(errors.New("connection Pooling is being enabled")) }) if err != nil { - return errors.New("Unable to enable connection pooling " + "The operation timed out waiting to enable connection pooling") + return errors.New("unable to enable connection pooling " + "The operation timed out waiting to enable connection pooling") } return nil @@ -1460,11 +1454,11 @@ func disableConnectionPooling(ctx context.Context, apiClient *openapiclient.APIC } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("Connection Pooling is being disabled")) + return retry.RetryableError(errors.New("connection Pooling is being disabled")) }) if err != nil { - return errors.New("Unable to disable connection pooling " + "The operation timed out waiting to disable connection pooling") + return errors.New("unable to disable connection pooling " + "The operation timed out waiting to disable connection pooling") } return nil @@ -1493,11 +1487,11 @@ func editClusterCmk(ctx context.Context, apiClient *openapiclient.APIClient, acc } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("Cluster CMK is getting updated.")) + return retry.RetryableError(errors.New("cluster CMK is getting updated")) }) if err != nil { - return errors.New("Unable to edit cluster CMK. " + "The operation timed out waiting to edit CMK.") + return errors.New("unable to edit cluster CMK. " + "The operation timed out waiting to edit CMK") } return nil @@ -1539,11 +1533,11 @@ func resumeCluster(ctx context.Context, apiClient *openapiclient.APIClient, acco } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("The cluster is being resumed.")) + return retry.RetryableError(errors.New("the cluster is being resumed")) }) if err != nil { - return errors.New("Unable to resume cluster. " + "The operation timed out waiting to resume the cluster.") + return errors.New("unable to resume cluster. " + "The operation timed out waiting to resume the cluster") } return nil @@ -1575,7 +1569,7 @@ func (r resourceCluster) Read(ctx context.Context, req tfsdk.ReadResourceRequest var state Cluster getIDsFromState(ctx, req.State, &state) - allowListIDs := []string{} + var allowListIDs []string allowListProvided := false if state.ClusterAllowListIDs != nil { allowListProvided = true @@ -1584,7 +1578,7 @@ func (r resourceCluster) Read(ctx context.Context, req tfsdk.ReadResourceRequest } } - regions := []string{} + var regions []string for _, regionInfo := range state.ClusterRegionInfo { regions = append(regions, regionInfo.Region.Value) } @@ -1611,7 +1605,7 @@ func (r resourceCluster) Read(ctx context.Context, req tfsdk.ReadResourceRequest cluster.CMKSpec.GCPCMKSpec.GcpServiceAccount.ClientId.Value = cmkSpec.GCPCMKSpec.GcpServiceAccount.ClientId.Value cluster.CMKSpec.GCPCMKSpec.GcpServiceAccount.PrivateKey.Value = cmkSpec.GCPCMKSpec.GcpServiceAccount.PrivateKey.Value case "AZURE": - cluster.CMKSpec.AzureCMKSpec.ClientSecret = types.String{Value: string(cmkSpec.AzureCMKSpec.ClientSecret.Value)} + cluster.CMKSpec.AzureCMKSpec.ClientSecret = types.String{Value: cmkSpec.AzureCMKSpec.ClientSecret.Value} } } @@ -1705,7 +1699,7 @@ func resourceClusterRead(ctx context.Context, accountId string, projectId string cmkDataSpec := cmkResp.GetData().Spec.Get() cmkSpec.ProviderType = types.String{Value: string(cmkDataSpec.GetProviderType())} - cmkSpec.IsEnabled = types.Bool{Value: bool(cmkDataSpec.GetIsEnabled())} + cmkSpec.IsEnabled = types.Bool{Value: cmkDataSpec.GetIsEnabled()} switch cmkSpec.ProviderType.Value { case "AWS": @@ -1769,7 +1763,7 @@ func resourceClusterRead(ctx context.Context, accountId string, projectId string } } - // fill all fields of schema except credentials - credentials are not returned by api call + // fill with all fields of schema except credentials - credentials are not returned by api call cluster.AccountID.Value = accountId cluster.ProjectID.Value = projectId cluster.ClusterID.Value = clusterId @@ -1897,9 +1891,7 @@ func resourceClusterRead(ctx context.Context, accountId string, projectId string } } cluster.ClusterRegionInfo = clusterRegionInfo - if len(respClusterRegionInfo) > 0 { - cluster.CloudType.Value = string(respClusterRegionInfo[0].PlacementInfo.CloudInfo.GetCode()) - } + cluster.CloudType.Value = string(respClusterRegionInfo[0].PlacementInfo.CloudInfo.GetCode()) if allowListProvided { for { @@ -1909,8 +1901,8 @@ func resourceClusterRead(ctx context.Context, accountId string, projectId string return cluster, false, errMsg } allowListIDMap := map[string]bool{} - allowListIDs := []types.String{} - allowListStrings := []string{} + var allowListIDs []types.String + var allowListStrings []string // This is being to done to preserve order in the list since an order mismatch is treated as state mismatch by Terraform for _, elem := range clusterAllowListMappingResp.Data { allowListIDMap[elem.Info.Id] = true @@ -1978,11 +1970,11 @@ func handleRestore(ctx context.Context, accountId string, projectId string, clus } else { return retry.RetryableError(errors.New("Unable to get restore state: " + message)) } - return retry.RetryableError(errors.New("The backup restore is in progress")) + return retry.RetryableError(errors.New("the backup restore is in progress")) }) if err != nil { - return errors.New("Unable to restore backup to the cluster: The operation timed out waiting for backup restore.") + return errors.New("unable to restore backup to the cluster: The operation timed out waiting for backup restore") } return nil @@ -2056,6 +2048,19 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq return } } + + if !regionInfo.DiskSizeGb.IsUnknown() && !util.IsDiskSizeValid(plan.ClusterTier.Value, regionInfo.DiskSizeGb.Value) { + resp.Diagnostics.AddError("Invalid disk size in "+regionInfo.Region.Value, "The disk size for a paid cluster must be at least 50 GB.") + return + } + + if !(regionInfo.DiskIops.IsUnknown() || regionInfo.DiskIops.IsNull()) { + isValid, err := util.IsDiskIopsValid(plan.CloudType.Value, plan.ClusterTier.Value, regionInfo.DiskIops.Value) + if !isValid { + resp.Diagnostics.AddError("Invalid disk IOPS in "+regionInfo.Region.Value, err) + return + } + } } scheduleId := "" @@ -2124,7 +2129,7 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq if retries < 6 { retries++ tflog.Info(ctx, "Cluster edit task not found, retrying...") - return retry.RetryableError(errors.New("Cluster not found, retrying")) + return retry.RetryableError(errors.New("cluster not found, retrying")) } else { tflog.Info(ctx, "Cluster edit task not found, the change would not have required a task creation") return nil @@ -2135,7 +2140,7 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq if checkNewTaskSpawned { if asState == string(openapiclient.TASKACTIONSTATEENUM_IN_PROGRESS) { checkNewTaskSpawned = false - return retry.RetryableError(errors.New("Cluster edit operation in progress")) + return retry.RetryableError(errors.New("cluster edit operation in progress")) } else { tflog.Info(ctx, "Cluster edit task not found, the change would not have required a task creation") return nil @@ -2151,7 +2156,7 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("Cluster edit operation in progress")) + return retry.RetryableError(errors.New("cluster edit operation in progress")) }) if err != nil { @@ -2176,7 +2181,7 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("Cluster edit is in progress")) + return retry.RetryableError(errors.New("cluster edit is in progress")) }) if err != nil { @@ -2238,7 +2243,7 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq } } - allowListIDs := []string{} + var allowListIDs []string allowListProvided := false if plan.ClusterAllowListIDs != nil { @@ -2293,7 +2298,7 @@ func (r resourceCluster) Update(ctx context.Context, req tfsdk.UpdateResourceReq } } - regions := []string{} + var regions []string for _, regionInfo := range plan.ClusterRegionInfo { regions = append(regions, regionInfo.Region.Value) } @@ -2375,7 +2380,7 @@ func (r resourceCluster) Delete(ctx context.Context, req tfsdk.DeleteResourceReq } else { return handleReadFailureWithRetries(ctx, &readClusterRetries, 2, message) } - return retry.RetryableError(errors.New("Cluster deletion operation in progress")) + return retry.RetryableError(errors.New("cluster deletion operation in progress")) }) if err != nil { @@ -2389,7 +2394,7 @@ func (r resourceCluster) Delete(ctx context.Context, req tfsdk.DeleteResourceReq resp.State.RemoveResource(ctx) } -// Import resource +// ImportState Import resource func (r resourceCluster) ImportState(ctx context.Context, req tfsdk.ImportResourceStateRequest, resp *tfsdk.ImportResourceStateResponse) { // Save the import identifier in the id attribute tfsdk.ResourceImportStatePassthroughID(ctx, path.Root("id"), req, resp)