diff --git a/cmd/lint/main.go b/cmd/lint/main.go index 0888f7268b..72447c08af 100644 --- a/cmd/lint/main.go +++ b/cmd/lint/main.go @@ -246,6 +246,7 @@ var vocabWords = []string{ "deserializer", "deserializers", "dns", + "ecku", "elastic", "env", "eu", diff --git a/internal/kafka/command_cluster_create.go b/internal/kafka/command_cluster_create.go index e4a86804ab..58fa936de1 100644 --- a/internal/kafka/command_cluster_create.go +++ b/internal/kafka/command_cluster_create.go @@ -63,6 +63,8 @@ func (c *clusterCommand) newCreateCommand() *cobra.Command { pcmd.AddAvailabilityFlag(cmd) pcmd.AddTypeFlag(cmd) cmd.Flags().Int("cku", 0, `Number of Confluent Kafka Units (non-negative). Required for Kafka clusters of type "dedicated".`) + cmd.Flags().Int("max-ecku", 0, `Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. `+ + `Kafka clusters with "HIGH" availability must have at least two eCKUs.`) pcmd.AddByokKeyFlag(cmd, c.AuthenticatedCLICommand) pcmd.AddNetworkFlag(cmd, c.AuthenticatedCLICommand) pcmd.AddContextFlag(cmd, c.CLICommand) @@ -135,6 +137,26 @@ func (c *clusterCommand) create(cmd *cobra.Command, args []string) error { Byok: keyGlobalObjectReference, }} + if cmd.Flags().Changed("max-ecku") { + maxEcku, err := cmd.Flags().GetInt("max-ecku") + if err != nil { + return err + } + if clusterType == skuDedicated { + return errors.NewErrorWithSuggestions("the `--max-ecku` flag can only be used when creating a Basic, Standard, Enterprise, or Freight Kafka cluster", "Specify a different cluster with `--type` flag.") + } + + if clusterType == skuBasic { + createCluster.Spec.Config.CmkV2Basic.MaxEcku = cmkv2.PtrInt32(int32(maxEcku)) + } else if clusterType == skuStandard { + createCluster.Spec.Config.CmkV2Standard.MaxEcku = cmkv2.PtrInt32(int32(maxEcku)) + } else if clusterType == skuEnterprise { + createCluster.Spec.Config.CmkV2Enterprise.MaxEcku = cmkv2.PtrInt32(int32(maxEcku)) + } else if clusterType == skuFreight { + createCluster.Spec.Config.CmkV2Freight.MaxEcku = cmkv2.PtrInt32(int32(maxEcku)) + } + } + if cmd.Flags().Changed("cku") { cku, err := cmd.Flags().GetInt("cku") if err != nil { diff --git a/internal/kafka/command_cluster_describe.go b/internal/kafka/command_cluster_describe.go index cedc563a5c..00d64fbaaf 100644 --- a/internal/kafka/command_cluster_describe.go +++ b/internal/kafka/command_cluster_describe.go @@ -39,6 +39,7 @@ type describeStruct struct { ByokKeyId string `human:"BYOK Key ID" serialized:"byok_key_id"` EncryptionKeyId string `human:"Encryption Key ID" serialized:"encryption_key_id"` RestEndpoint string `human:"REST Endpoint" serialized:"rest_endpoint"` + MaxEcku int32 `human:"Max eCKU,omitempty" serialized:"max_ecku,omitempty"` TopicCount int `human:"Topic Count,omitempty" serialized:"topic_count,omitempty"` } @@ -149,6 +150,7 @@ func convertClusterToDescribeStruct(cluster *cmkv2.CmkV2Cluster, usageLimits *ka ByokKeyId: getCmkByokId(cluster), EncryptionKeyId: getCmkEncryptionKey(cluster), RestEndpoint: cluster.Spec.GetHttpEndpoint(), + MaxEcku: getCmkMaxEcku(cluster), } // Only set limits field if usage limits are available @@ -191,6 +193,13 @@ func getKafkaClusterDescribeFields(cluster *cmkv2.CmkV2Cluster, basicFields []st if cluster.Spec.Byok != nil { describeFields = append(describeFields, "ByokId") } + } else { + // Max eCKU field is available only for Basic, Standard, Enterprise, and Freight clusters + // Only show it if the value is positive (non-positive values like -1 indicate it's not set) + maxEcku := getCmkMaxEcku(cluster) + if maxEcku > 0 { + describeFields = append(describeFields, "MaxEcku") + } } if getTopicCount { diff --git a/internal/kafka/command_cluster_update.go b/internal/kafka/command_cluster_update.go index 7bacd82652..78eaeb50d0 100644 --- a/internal/kafka/command_cluster_update.go +++ b/internal/kafka/command_cluster_update.go @@ -32,18 +32,24 @@ func (c *clusterCommand) newUpdateCommand() *cobra.Command { Text: `Update the type of a Kafka cluster from "Basic" to "Standard":`, Code: `confluent kafka cluster update lkc-123456 --type "standard"`, }, + examples.Example{ + Text: `Update the Max eCKU count of a Kafka cluster:`, + Code: `confluent kafka cluster update lkc-123456 --max-ecku 5`, + }, ), } cmd.Flags().String("name", "", "Name of the Kafka cluster.") cmd.Flags().Uint32("cku", 0, `Number of Confluent Kafka Units. For Kafka clusters of type "dedicated" only. When shrinking a cluster, you must reduce capacity one CKU at a time.`) cmd.Flags().String("type", "", `Type of the Kafka cluster. Only supports upgrading from "Basic" to "Standard".`) + cmd.Flags().Int("max-ecku", 0, `Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. `+ + `Kafka clusters with "HIGH" availability must have at least two eCKUs.`) pcmd.AddContextFlag(cmd, c.CLICommand) pcmd.AddEnvironmentFlag(cmd, c.AuthenticatedCLICommand) pcmd.AddEndpointFlag(cmd, c.AuthenticatedCLICommand) pcmd.AddOutputFlag(cmd) - cmd.MarkFlagsOneRequired("name", "cku", "type") + cmd.MarkFlagsOneRequired("name", "cku", "type", "max-ecku") return cmd } @@ -91,7 +97,36 @@ func (c *clusterCommand) update(cmd *cobra.Command, args []string) error { update.Spec.Config = &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{CmkV2Dedicated: &cmkv2.CmkV2Dedicated{Kind: "Dedicated", Cku: updatedCku}} } - if cmd.Flags().Changed("type") { + if cmd.Flags().Changed("max-ecku") { + maxEcku, err := cmd.Flags().GetInt("max-ecku") + if err != nil { + return err + } + currentConfig := currentCluster.GetSpec().Config + if currentConfig.CmkV2Dedicated != nil { + return errors.NewErrorWithSuggestions("the `--max-ecku` flag can only be used when creating or updating a Basic, Standard, Enterprise, or Freight Kafka cluster", "Specify another cluster or use the `--cku` flag instead.") + } + if maxEcku < 1 { + return fmt.Errorf("`--max-ecku` value must be at least 1") + } + + targetType := c.getCurrentClusterType(currentConfig) + if cmd.Flags().Changed("type") { + newType, err := cmd.Flags().GetString("type") + if err != nil { + return err + } + if newType == "" { + return fmt.Errorf("`--type` flag value must not be empty") + } + if currentConfig.CmkV2Basic == nil || strings.ToLower(newType) != "standard" { + return fmt.Errorf(`clusters can only be upgraded from "Basic" to "Standard"`) + } + targetType = "Standard" + } + + update.Spec.Config = c.createClusterConfig(targetType, int32(maxEcku)) + } else if cmd.Flags().Changed("type") { newType, err := cmd.Flags().GetString("type") if err != nil { return err @@ -100,12 +135,12 @@ func (c *clusterCommand) update(cmd *cobra.Command, args []string) error { return fmt.Errorf("`--type` flag value must not be empty") } - // Validate cluster type upgrade currentConfig := currentCluster.GetSpec().Config if currentConfig.CmkV2Basic == nil || strings.ToLower(newType) != "standard" { return fmt.Errorf(`clusters can only be upgraded from "Basic" to "Standard"`) } + // When upgrading type without specifying max-ecku, use default max-ecku value for Standard cluster returned by API // Set the new cluster type update.Spec.Config = &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{ CmkV2Standard: &cmkv2.CmkV2Standard{ @@ -134,6 +169,59 @@ func (c *clusterCommand) update(cmd *cobra.Command, args []string) error { return c.outputKafkaClusterDescription(cmd, &updatedCluster, true, usageLimits) } +func (c *clusterCommand) getCurrentClusterType(config *cmkv2.CmkV2ClusterSpecConfigOneOf) string { + if config.CmkV2Basic != nil { + return "Basic" + } else if config.CmkV2Standard != nil { + return "Standard" + } else if config.CmkV2Enterprise != nil { + return "Enterprise" + } else if config.CmkV2Freight != nil { + return "Freight" + } + return "Basic" +} + +func (c *clusterCommand) createClusterConfig(clusterType string, maxEcku int32) *cmkv2.CmkV2ClusterSpecUpdateConfigOneOf { + switch clusterType { + case "Basic": + return &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{ + CmkV2Basic: &cmkv2.CmkV2Basic{ + Kind: "Basic", + MaxEcku: cmkv2.PtrInt32(maxEcku), + }, + } + case "Standard": + return &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{ + CmkV2Standard: &cmkv2.CmkV2Standard{ + Kind: "Standard", + MaxEcku: cmkv2.PtrInt32(maxEcku), + }, + } + case "Enterprise": + return &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{ + CmkV2Enterprise: &cmkv2.CmkV2Enterprise{ + Kind: "Enterprise", + MaxEcku: cmkv2.PtrInt32(maxEcku), + }, + } + case "Freight": + return &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{ + CmkV2Freight: &cmkv2.CmkV2Freight{ + Kind: "Freight", + MaxEcku: cmkv2.PtrInt32(maxEcku), + }, + } + default: + return &cmkv2.CmkV2ClusterSpecUpdateConfigOneOf{ + CmkV2Basic: &cmkv2.CmkV2Basic{ + Kind: "Basic", + MaxEcku: cmkv2.PtrInt32(maxEcku), + }, + } + } +} + func (c *clusterCommand) validateResize(cku int32, currentCluster *cmkv2.CmkV2Cluster) (int32, error) { // Ensure the cluster is a Dedicated Cluster if currentCluster.GetSpec().Config.CmkV2Dedicated == nil { diff --git a/internal/kafka/utils.go b/internal/kafka/utils.go index fa5e7454c9..8df2dcdd23 100644 --- a/internal/kafka/utils.go +++ b/internal/kafka/utils.go @@ -116,23 +116,6 @@ func getCmkClusterType(cluster *cmkv2.CmkV2Cluster) string { return ccstructs.Sku_name[0] // UNKNOWN } -func getCmkMaxEcku(cluster *cmkv2.CmkV2Cluster) int32 { - if isBasic(cluster) { - return cluster.Spec.Config.CmkV2Basic.GetMaxEcku() - } - if isStandard(cluster) { - return cluster.Spec.Config.CmkV2Standard.GetMaxEcku() - } - if isEnterprise(cluster) { - return cluster.Spec.Config.CmkV2Enterprise.GetMaxEcku() - } - if isFreight(cluster) { - return cluster.Spec.Config.CmkV2Freight.GetMaxEcku() - } - - return -1 -} - func getCmkClusterSize(cluster *cmkv2.CmkV2Cluster) int32 { if isDedicated(cluster) { return *cluster.Status.Cku @@ -147,6 +130,27 @@ func getCmkClusterPendingSize(cluster *cmkv2.CmkV2Cluster) int32 { return -1 } +func getCmkMaxEcku(cluster *cmkv2.CmkV2Cluster) int32 { + if isBasic(cluster) { + if cluster.GetSpec().Config.CmkV2Basic.MaxEcku != nil { + return cluster.GetSpec().Config.CmkV2Basic.GetMaxEcku() + } + } else if isStandard(cluster) { + if cluster.GetSpec().Config.CmkV2Standard.MaxEcku != nil { + return cluster.GetSpec().Config.CmkV2Standard.GetMaxEcku() + } + } else if isEnterprise(cluster) { + if cluster.GetSpec().Config.CmkV2Enterprise.MaxEcku != nil { + return cluster.GetSpec().Config.CmkV2Enterprise.GetMaxEcku() + } + } else if isFreight(cluster) { + if cluster.GetSpec().Config.CmkV2Freight.MaxEcku != nil { + return cluster.GetSpec().Config.CmkV2Freight.GetMaxEcku() + } + } + return -1 +} + func getCmkByokId(cluster *cmkv2.CmkV2Cluster) string { if isDedicated(cluster) && cluster.Spec.Byok != nil { return cluster.Spec.Byok.Id diff --git a/test/fixtures/output/kafka/1.golden b/test/fixtures/output/kafka/1.golden index 7510b9f245..fa1f225136 100644 --- a/test/fixtures/output/kafka/1.golden +++ b/test/fixtures/output/kafka/1.golden @@ -23,6 +23,7 @@ Flags: --availability string Specify the availability of the cluster as "single-zone", "multi-zone", "low", or "high". (default "single-zone") --type string Specify the type of the Kafka cluster as "basic", "standard", "enterprise", "freight", or "dedicated". (default "basic") --cku int Number of Confluent Kafka Units (non-negative). Required for Kafka clusters of type "dedicated". + --max-ecku int Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. --byok string Confluent Cloud Key ID of a registered encryption key (use "confluent byok create" to register a key). --network string Network ID. --context string CLI context name. diff --git a/test/fixtures/output/kafka/cluster/create-basic-with-ecku-limits.golden b/test/fixtures/output/kafka/cluster/create-basic-with-ecku-limits.golden index aac6d8378d..559718c63f 100644 --- a/test/fixtures/output/kafka/cluster/create-basic-with-ecku-limits.golden +++ b/test/fixtures/output/kafka/cluster/create-basic-with-ecku-limits.golden @@ -4,8 +4,8 @@ It may take up to 5 minutes for the Kafka cluster to be ready. | ID | lkc-with-ecku-limits | | Name | my-basic-cluster-with-ecku-limits | | Type | BASIC | -| Ingress Limit (MB/s) | 25 | -| Egress Limit (MB/s) | 75 | +| Ingress Limit (MB/s) | 250 | +| Egress Limit (MB/s) | 750 | | Storage | 5000 GB | | Cloud | aws | | Region | us-west-2 | diff --git a/test/fixtures/output/kafka/cluster/create-dedicated-max-ecku-error.golden b/test/fixtures/output/kafka/cluster/create-dedicated-max-ecku-error.golden new file mode 100644 index 0000000000..a81e8ae645 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/create-dedicated-max-ecku-error.golden @@ -0,0 +1,4 @@ +Error: the `--max-ecku` flag can only be used when creating a Basic, Standard, Enterprise, or Freight Kafka cluster + +Suggestions: + Specify a different cluster with `--type` flag. diff --git a/test/fixtures/output/kafka/cluster/create-enterprise-max-ecku.golden b/test/fixtures/output/kafka/cluster/create-enterprise-max-ecku.golden new file mode 100644 index 0000000000..ec0a3de673 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/create-enterprise-max-ecku.golden @@ -0,0 +1,17 @@ +It may take up to 5 minutes for the Kafka cluster to be ready. ++----------------------+---------------------------+ +| Current | false | +| ID | lkc-def963 | +| Name | my-new-cluster | +| Type | ENTERPRISE | +| Ingress Limit (MB/s) | 240 | +| Egress Limit (MB/s) | 720 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-east-1 | +| Availability | multi-zone | +| Status | PROVISIONING | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | https://pkc-endpoint | +| Max eCKU | 4 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/create-flag-error.golden b/test/fixtures/output/kafka/cluster/create-flag-error.golden index aaa80c3775..aa23c33f72 100644 --- a/test/fixtures/output/kafka/cluster/create-flag-error.golden +++ b/test/fixtures/output/kafka/cluster/create-flag-error.golden @@ -1,4 +1,4 @@ -Error: at least one of the flags in the group [name cku type] is required +Error: at least one of the flags in the group [name cku type max-ecku] is required Usage: confluent kafka cluster update [flags] @@ -11,10 +11,15 @@ Update the type of a Kafka cluster from "Basic" to "Standard": $ confluent kafka cluster update lkc-123456 --type "standard" +Update the Max eCKU count of a Kafka cluster: + + $ confluent kafka cluster update lkc-123456 --max-ecku 5 + Flags: --name string Name of the Kafka cluster. --cku uint32 Number of Confluent Kafka Units. For Kafka clusters of type "dedicated" only. When shrinking a cluster, you must reduce capacity one CKU at a time. --type string Type of the Kafka cluster. Only supports upgrading from "Basic" to "Standard". + --max-ecku int Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. --context string CLI context name. --environment string Environment ID. --kafka-endpoint string Endpoint to be used for this Kafka cluster. diff --git a/test/fixtures/output/kafka/cluster/create-freight-max-ecku.golden b/test/fixtures/output/kafka/cluster/create-freight-max-ecku.golden new file mode 100644 index 0000000000..f5750a5db7 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/create-freight-max-ecku.golden @@ -0,0 +1,17 @@ +It may take up to 5 minutes for the Kafka cluster to be ready. ++----------------------+---------------------------+ +| Current | false | +| ID | lkc-def963 | +| Name | my-new-cluster | +| Type | FREIGHT | +| Ingress Limit (MB/s) | 180 | +| Egress Limit (MB/s) | 540 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-east-1 | +| Availability | low | +| Status | PROVISIONING | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | https://pkc-endpoint | +| Max eCKU | 3 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/create-help.golden b/test/fixtures/output/kafka/cluster/create-help.golden index 0bd8d6a27b..f5c8203cfd 100644 --- a/test/fixtures/output/kafka/cluster/create-help.golden +++ b/test/fixtures/output/kafka/cluster/create-help.golden @@ -26,6 +26,7 @@ Flags: --availability string Specify the availability of the cluster as "single-zone", "multi-zone", "low", or "high". (default "single-zone") --type string Specify the type of the Kafka cluster as "basic", "standard", "enterprise", "freight", or "dedicated". (default "basic") --cku int Number of Confluent Kafka Units (non-negative). Required for Kafka clusters of type "dedicated". + --max-ecku int Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. --byok string Confluent Cloud Key ID of a registered encryption key (use "confluent byok create" to register a key). --network string Network ID. --context string CLI context name. diff --git a/test/fixtures/output/kafka/cluster/create-standard-with-ecku-limits.golden b/test/fixtures/output/kafka/cluster/create-standard-with-ecku-limits.golden index f24a75954c..7a6a62963e 100644 --- a/test/fixtures/output/kafka/cluster/create-standard-with-ecku-limits.golden +++ b/test/fixtures/output/kafka/cluster/create-standard-with-ecku-limits.golden @@ -4,8 +4,8 @@ It may take up to 5 minutes for the Kafka cluster to be ready. | ID | lkc-with-ecku-limits | | Name | my-standard-cluster-with-ecku-limits | | Type | STANDARD | -| Ingress Limit (MB/s) | 125 | -| Egress Limit (MB/s) | 375 | +| Ingress Limit (MB/s) | 250 | +| Egress Limit (MB/s) | 750 | | Storage | Unlimited | | Cloud | aws | | Region | us-west-2 | diff --git a/test/fixtures/output/kafka/cluster/describe-basic-with-ecku-limits.golden b/test/fixtures/output/kafka/cluster/describe-basic-with-ecku-limits.golden index 5503c05867..41f860ce36 100644 --- a/test/fixtures/output/kafka/cluster/describe-basic-with-ecku-limits.golden +++ b/test/fixtures/output/kafka/cluster/describe-basic-with-ecku-limits.golden @@ -12,5 +12,6 @@ | Status | UP | | Endpoint | SASL_SSL://kafka-endpoint | | REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 5 | | Topic Count | 2 | +----------------------+-------------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-basic-with-ecku-limits.golden b/test/fixtures/output/kafka/cluster/update-basic-with-ecku-limits.golden index 98028b21b4..e260479c13 100644 --- a/test/fixtures/output/kafka/cluster/update-basic-with-ecku-limits.golden +++ b/test/fixtures/output/kafka/cluster/update-basic-with-ecku-limits.golden @@ -12,5 +12,6 @@ | Status | UP | | Endpoint | SASL_SSL://kafka-endpoint | | REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 5 | | Topic Count | 2 | +----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-fail.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-fail.golden new file mode 100644 index 0000000000..5b49d24753 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-fail.golden @@ -0,0 +1,4 @@ +Error: failed to update Kafka cluster: failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 10 eCKUs for ENTERPRISE SKU + +Suggestions: + A cluster can't be updated while still provisioning. If you just created this cluster, retry in a few minutes. diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-json.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-json.golden new file mode 100644 index 0000000000..37a86d918a --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-json.golden @@ -0,0 +1,17 @@ +{ + "is_current": true, + "id": "lkc-update-enterprise", + "name": "", + "type": "ENTERPRISE", + "ingress_limit": 300, + "egress_limit": 900, + "storage": "Unlimited", + "cloud": "aws", + "region": "us-west-2", + "availability": "multi-zone", + "status": "UP", + "endpoint": "SASL_SSL://kafka-endpoint", + "rest_endpoint": "http://127.0.0.1:1025", + "max_ecku": 5, + "topic_count": 2 +} diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-max.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-max.golden new file mode 100644 index 0000000000..142aff7381 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-max.golden @@ -0,0 +1,18 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-enterprise | +| Name | | +| Type | ENTERPRISE | +| Ingress Limit (MB/s) | 600 | +| Egress Limit (MB/s) | 1800 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 10 | +| Topic Count | 2 | ++----------------------+---------------------------+ + diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-min.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-min.golden new file mode 100644 index 0000000000..b24ad3cfbe --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-min.golden @@ -0,0 +1,18 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-enterprise | +| Name | | +| Type | ENTERPRISE | +| Ingress Limit (MB/s) | 60 | +| Egress Limit (MB/s) | 180 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 1 | +| Topic Count | 2 | ++----------------------+---------------------------+ + diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-with-name.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-with-name.golden new file mode 100644 index 0000000000..5c9899f15d --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-with-name.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-enterprise | +| Name | enterprise-cluster | +| Type | ENTERPRISE | +| Ingress Limit (MB/s) | 420 | +| Egress Limit (MB/s) | 1260 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 7 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-yaml.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-yaml.golden new file mode 100644 index 0000000000..f85c1199d4 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku-yaml.golden @@ -0,0 +1,15 @@ +is_current: true +id: lkc-update-enterprise +name: "" +type: ENTERPRISE +ingress_limit: 300 +egress_limit: 900 +storage: Unlimited +cloud: aws +region: us-west-2 +availability: multi-zone +status: UP +endpoint: SASL_SSL://kafka-endpoint +rest_endpoint: http://127.0.0.1:1025 +max_ecku: 5 +topic_count: 2 diff --git a/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku.golden b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku.golden new file mode 100644 index 0000000000..9de273af1d --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-enterprise-max-ecku.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-enterprise | +| Name | | +| Type | ENTERPRISE | +| Ingress Limit (MB/s) | 300 | +| Egress Limit (MB/s) | 900 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 5 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku-fail.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-fail.golden new file mode 100644 index 0000000000..80da380585 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-fail.golden @@ -0,0 +1,4 @@ +Error: failed to update Kafka cluster: failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 152 eCKUs for FREIGHT SKU + +Suggestions: + A cluster can't be updated while still provisioning. If you just created this cluster, retry in a few minutes. diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku-json.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-json.golden new file mode 100644 index 0000000000..c94b54b835 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-json.golden @@ -0,0 +1,17 @@ +{ + "is_current": true, + "id": "lkc-update-freight", + "name": "", + "type": "FREIGHT", + "ingress_limit": 3000, + "egress_limit": 9000, + "storage": "Unlimited", + "cloud": "aws", + "region": "us-west-2", + "availability": "multi-zone", + "status": "UP", + "endpoint": "SASL_SSL://kafka-endpoint", + "rest_endpoint": "http://127.0.0.1:1025", + "max_ecku": 50, + "topic_count": 2 +} diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku-max.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-max.golden new file mode 100644 index 0000000000..1b8a902710 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-max.golden @@ -0,0 +1,18 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-freight | +| Name | | +| Type | FREIGHT | +| Ingress Limit (MB/s) | 9120 | +| Egress Limit (MB/s) | 27360 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 152 | +| Topic Count | 2 | ++----------------------+---------------------------+ + diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku-min.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-min.golden new file mode 100644 index 0000000000..d9b34c668a --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-min.golden @@ -0,0 +1,18 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-freight | +| Name | | +| Type | FREIGHT | +| Ingress Limit (MB/s) | 60 | +| Egress Limit (MB/s) | 180 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 1 | +| Topic Count | 2 | ++----------------------+---------------------------+ + diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku-with-name.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-with-name.golden new file mode 100644 index 0000000000..6b724df33d --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-with-name.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-freight | +| Name | freight-cluster | +| Type | FREIGHT | +| Ingress Limit (MB/s) | 4500 | +| Egress Limit (MB/s) | 13500 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 75 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku-yaml.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-yaml.golden new file mode 100644 index 0000000000..5a29dd7439 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku-yaml.golden @@ -0,0 +1,16 @@ +is_current: true +id: lkc-update-freight +name: "" +type: FREIGHT +ingress_limit: 3000 +egress_limit: 9000 +storage: Unlimited +cloud: aws +region: us-west-2 +availability: multi-zone +status: UP +endpoint: SASL_SSL://kafka-endpoint +rest_endpoint: http://127.0.0.1:1025 +max_ecku: 50 +topic_count: 2 + diff --git a/test/fixtures/output/kafka/cluster/update-freight-max-ecku.golden b/test/fixtures/output/kafka/cluster/update-freight-max-ecku.golden new file mode 100644 index 0000000000..7474503c02 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-freight-max-ecku.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-freight | +| Name | | +| Type | FREIGHT | +| Ingress Limit (MB/s) | 3000 | +| Egress Limit (MB/s) | 9000 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | multi-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 50 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-help.golden b/test/fixtures/output/kafka/cluster/update-help.golden index a7b8784f6e..e3ae69c2e2 100644 --- a/test/fixtures/output/kafka/cluster/update-help.golden +++ b/test/fixtures/output/kafka/cluster/update-help.golden @@ -12,10 +12,15 @@ Update the type of a Kafka cluster from "Basic" to "Standard": $ confluent kafka cluster update lkc-123456 --type "standard" +Update the Max eCKU count of a Kafka cluster: + + $ confluent kafka cluster update lkc-123456 --max-ecku 5 + Flags: --name string Name of the Kafka cluster. --cku uint32 Number of Confluent Kafka Units. For Kafka clusters of type "dedicated" only. When shrinking a cluster, you must reduce capacity one CKU at a time. --type string Type of the Kafka cluster. Only supports upgrading from "Basic" to "Standard". + --max-ecku int Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. --context string CLI context name. --environment string Environment ID. --kafka-endpoint string Endpoint to be used for this Kafka cluster. diff --git a/test/fixtures/output/kafka/cluster/update-max-ecku.golden b/test/fixtures/output/kafka/cluster/update-max-ecku.golden new file mode 100644 index 0000000000..e0bd2e41ce --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-max-ecku.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update | +| Name | | +| Type | BASIC | +| Ingress Limit (MB/s) | 250 | +| Egress Limit (MB/s) | 750 | +| Storage | 5000 GB | +| Cloud | aws | +| Region | us-west-2 | +| Availability | single-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 4 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-standard-max-ecku.golden b/test/fixtures/output/kafka/cluster/update-standard-max-ecku.golden new file mode 100644 index 0000000000..34d0feb165 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-standard-max-ecku.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update-standard | +| Name | | +| Type | STANDARD | +| Ingress Limit (MB/s) | 250 | +| Egress Limit (MB/s) | 750 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | single-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 2 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-type-empty-error.golden b/test/fixtures/output/kafka/cluster/update-type-empty-error.golden index 814a92f061..ac44cb2c0c 100644 --- a/test/fixtures/output/kafka/cluster/update-type-empty-error.golden +++ b/test/fixtures/output/kafka/cluster/update-type-empty-error.golden @@ -11,10 +11,15 @@ Update the type of a Kafka cluster from "Basic" to "Standard": $ confluent kafka cluster update lkc-123456 --type "standard" +Update the Max eCKU count of a Kafka cluster: + + $ confluent kafka cluster update lkc-123456 --max-ecku 5 + Flags: --name string Name of the Kafka cluster. --cku uint32 Number of Confluent Kafka Units. For Kafka clusters of type "dedicated" only. When shrinking a cluster, you must reduce capacity one CKU at a time. --type string Type of the Kafka cluster. Only supports upgrading from "Basic" to "Standard". + --max-ecku int Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. --context string CLI context name. --environment string Environment ID. --kafka-endpoint string Endpoint to be used for this Kafka cluster. diff --git a/test/fixtures/output/kafka/cluster/update-type-max-ecku-fail.golden b/test/fixtures/output/kafka/cluster/update-type-max-ecku-fail.golden new file mode 100644 index 0000000000..065d62ce5b --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-type-max-ecku-fail.golden @@ -0,0 +1,4 @@ +Error: failed to update Kafka cluster: failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 10 eCKUs for STANDARD SKU + +Suggestions: + A cluster can't be updated while still provisioning. If you just created this cluster, retry in a few minutes. diff --git a/test/fixtures/output/kafka/cluster/update-type-max-ecku-success.golden b/test/fixtures/output/kafka/cluster/update-type-max-ecku-success.golden new file mode 100644 index 0000000000..7377231241 --- /dev/null +++ b/test/fixtures/output/kafka/cluster/update-type-max-ecku-success.golden @@ -0,0 +1,17 @@ ++----------------------+---------------------------+ +| Current | true | +| ID | lkc-update | +| Name | | +| Type | STANDARD | +| Ingress Limit (MB/s) | 250 | +| Egress Limit (MB/s) | 750 | +| Storage | Unlimited | +| Cloud | aws | +| Region | us-west-2 | +| Availability | single-zone | +| Status | UP | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 6 | +| Topic Count | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/cluster/update-type-success.golden b/test/fixtures/output/kafka/cluster/update-type-success.golden index 7b21daa1f3..b4ce5a1a87 100644 --- a/test/fixtures/output/kafka/cluster/update-type-success.golden +++ b/test/fixtures/output/kafka/cluster/update-type-success.golden @@ -12,5 +12,6 @@ | Status | UP | | Endpoint | SASL_SSL://kafka-endpoint | | REST Endpoint | http://127.0.0.1:1025 | +| Max eCKU | 10 | | Topic Count | 2 | +----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/create-basic-max-ecku-error.golden b/test/fixtures/output/kafka/create-basic-max-ecku-error.golden new file mode 100644 index 0000000000..329d9e0b65 --- /dev/null +++ b/test/fixtures/output/kafka/create-basic-max-ecku-error.golden @@ -0,0 +1,37 @@ +Error: invalid argument "abc" for "--max-ecku" flag: strconv.ParseInt: parsing "abc": invalid syntax +Usage: + confluent kafka cluster create [flags] + +Examples: +Create a new dedicated cluster that uses a customer-managed encryption key in GCP: + + $ confluent kafka cluster create sales092020 --cloud gcp --region asia-southeast1 --type dedicated --cku 1 --byok cck-a123z + +Create a new dedicated cluster that uses a customer-managed encryption key in AWS: + + $ confluent kafka cluster create my-cluster --cloud aws --region us-west-2 --type dedicated --cku 1 --byok cck-a123z + +Create a new Freight cluster that uses a customer-managed encryption key in AWS: + + $ confluent kafka cluster create my-cluster --cloud aws --region us-west-2 --type freight --cku 1 --byok cck-a123z --availability high + +For more information, see https://docs.confluent.io/current/cloud/clusters/byok-encrypted-clusters.html. + +Flags: + --cloud string Specify the cloud provider as "aws", "azure", or "gcp". + --region string Cloud region for Kafka (use "confluent kafka region list" to see all). + --availability string Specify the availability of the cluster as "single-zone", "multi-zone", "low", or "high". (default "single-zone") + --type string Specify the type of the Kafka cluster as "basic", "standard", "enterprise", "freight", or "dedicated". (default "basic") + --cku int Number of Confluent Kafka Units (non-negative). Required for Kafka clusters of type "dedicated". + --max-ecku int Maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. + --byok string Confluent Cloud Key ID of a registered encryption key (use "confluent byok create" to register a key). + --network string Network ID. + --context string CLI context name. + --environment string Environment ID. + -o, --output string Specify the output format as "human", "json", or "yaml". (default "human") + +Global Flags: + -h, --help Show help for this command. + --unsafe-trace Equivalent to -vvvv, but also log HTTP requests and responses which might contain plaintext secrets. + -v, --verbose count Increase verbosity (-v for warn, -vv for info, -vvv for debug, -vvvv for trace). + diff --git a/test/fixtures/output/kafka/create-basic-max-ecku.golden b/test/fixtures/output/kafka/create-basic-max-ecku.golden new file mode 100644 index 0000000000..45875acf82 --- /dev/null +++ b/test/fixtures/output/kafka/create-basic-max-ecku.golden @@ -0,0 +1,17 @@ +It may take up to 5 minutes for the Kafka cluster to be ready. ++----------------------+---------------------------+ +| Current | false | +| ID | lkc-def963 | +| Name | my-new-cluster | +| Type | BASIC | +| Ingress Limit (MB/s) | 250 | +| Egress Limit (MB/s) | 750 | +| Storage | 5000 GB | +| Cloud | aws | +| Region | us-east-1 | +| Availability | single-zone | +| Status | PROVISIONING | +| Endpoint | SASL_SSL://kafka-endpoint | +| REST Endpoint | https://pkc-endpoint | +| Max eCKU | 2 | ++----------------------+---------------------------+ diff --git a/test/fixtures/output/kafka/update-dedicated-max-ecku-error.golden b/test/fixtures/output/kafka/update-dedicated-max-ecku-error.golden new file mode 100644 index 0000000000..43c7df95c7 --- /dev/null +++ b/test/fixtures/output/kafka/update-dedicated-max-ecku-error.golden @@ -0,0 +1,4 @@ +Error: the `--max-ecku` flag can only be used when creating or updating a Basic, Standard, Enterprise, or Freight Kafka cluster + +Suggestions: + Specify another cluster or use the `--cku` flag instead. diff --git a/test/kafka_test.go b/test/kafka_test.go index 67ea98f5b0..750388175d 100644 --- a/test/kafka_test.go +++ b/test/kafka_test.go @@ -31,13 +31,18 @@ func (s *CLITestSuite) TestKafka() { {args: "kafka cluster create my-failed-cluster --cloud aws --region us-east-1 --availability single-zone --type oops", fixture: "kafka/cluster/create-type-error.golden", exitCode: 1}, {args: "kafka cluster create my-failed-cluster --cloud aws --region us-east-1 --availability single-zone --type dedicated --cku 0", fixture: "kafka/cluster/create-cku-error.golden", exitCode: 1}, {args: "kafka cluster create my-dedicated-cluster --cloud aws --region us-east-1 --type dedicated --cku 1", fixture: "kafka/22.golden"}, + {args: "kafka cluster create my-dedicated-cluster --cloud aws --region us-east-1 --type dedicated --max-ecku 5", fixture: "kafka/cluster/create-dedicated-max-ecku-error.golden", exitCode: 1}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --availability single-zone -o json", fixture: "kafka/23.golden"}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --availability single-zone -o yaml", fixture: "kafka/24.golden"}, + {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --availability single-zone --max-ecku 2", fixture: "kafka/create-basic-max-ecku.golden"}, + {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --availability single-zone --max-ecku abc", fixture: "kafka/create-basic-max-ecku-error.golden", exitCode: 1}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --availability oops-zone", fixture: "kafka/cluster/create-availability-zone-error.golden", exitCode: 1}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --type enterprise --availability multi-zone", fixture: "kafka/cluster/create-enterprise.golden"}, + {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --type enterprise --availability multi-zone --max-ecku 4", fixture: "kafka/cluster/create-enterprise-max-ecku.golden"}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --type enterprise", fixture: "kafka/cluster/create-enterprise-availability-zone-error.golden", exitCode: 1}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --type freight --availability multi-zone", fixture: "kafka/cluster/create-freight.golden"}, {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --type freight", fixture: "kafka/cluster/create-freight-low.golden"}, + {args: "kafka cluster create my-new-cluster --cloud aws --region us-east-1 --type freight --max-ecku 3", fixture: "kafka/cluster/create-freight-max-ecku.golden"}, {args: "kafka cluster create my-basic-cluster-with-ecku-limits --cloud aws --region us-west-2 --type basic", fixture: "kafka/cluster/create-basic-with-ecku-limits.golden"}, {args: "kafka cluster create my-standard-cluster-with-ecku-limits --cloud aws --region us-west-2 --type standard", fixture: "kafka/cluster/create-standard-with-ecku-limits.golden"}, @@ -48,14 +53,27 @@ func (s *CLITestSuite) TestKafka() { {args: "kafka cluster update lkc-with-ecku-limits --name lkc-update-name", fixture: "kafka/cluster/update-basic-with-ecku-limits.golden"}, {args: "kafka cluster update lkc-update-dedicated-expand --name lkc-update-dedicated-name --cku 2", fixture: "kafka/27.golden"}, {args: "kafka cluster update lkc-update-dedicated-expand --cku 2", fixture: "kafka/39.golden"}, + {args: "kafka cluster update lkc-update-dedicated-expand --cku 2 --max-ecku 3", fixture: "kafka/update-dedicated-max-ecku-error.golden", exitCode: 1}, {args: "kafka cluster update lkc-update --cku 2", fixture: "kafka/cluster/update-resize-error.golden", exitCode: 1}, {args: "kafka cluster update lkc-update-dedicated-shrink --name lkc-update-dedicated-name --cku 1", fixture: "kafka/44.golden"}, {args: "kafka cluster update lkc-update-dedicated-shrink --cku 1", fixture: "kafka/45.golden"}, {args: "kafka cluster update lkc-update-dedicated-shrink-multi --cku 1", fixture: "kafka/cluster/update-dedicated-shrink-error.golden", exitCode: 1}, {args: "kafka cluster update lkc-update --cku 1", fixture: "kafka/cluster/update-resize-error.golden", exitCode: 1}, + {args: "kafka cluster update lkc-update --max-ecku 4", fixture: "kafka/cluster/update-max-ecku.golden"}, + {args: "kafka cluster update lkc-update-standard --max-ecku 2", fixture: "kafka/cluster/update-standard-max-ecku.golden"}, + {args: "kafka cluster update lkc-update-enterprise --max-ecku 5", fixture: "kafka/cluster/update-enterprise-max-ecku.golden"}, + {args: "kafka cluster update lkc-update-enterprise --max-ecku 15", fixture: "kafka/cluster/update-enterprise-max-ecku-fail.golden", exitCode: 1}, + {args: "kafka cluster update lkc-update-enterprise --max-ecku 5 -o json", fixture: "kafka/cluster/update-enterprise-max-ecku-json.golden"}, + {args: "kafka cluster update lkc-update-enterprise --max-ecku 5 -o yaml", fixture: "kafka/cluster/update-enterprise-max-ecku-yaml.golden"}, + {args: "kafka cluster update lkc-update-enterprise --name enterprise-cluster --max-ecku 7", fixture: "kafka/cluster/update-enterprise-max-ecku-with-name.golden"}, + {args: "kafka cluster update lkc-update-freight --max-ecku 50", fixture: "kafka/cluster/update-freight-max-ecku.golden"}, + {args: "kafka cluster update lkc-update-freight --max-ecku 200", fixture: "kafka/cluster/update-freight-max-ecku-fail.golden", exitCode: 1}, + {args: "kafka cluster update lkc-update-freight --max-ecku 50 -o json", fixture: "kafka/cluster/update-freight-max-ecku-json.golden"}, + {args: "kafka cluster update lkc-update-freight --name freight-cluster --max-ecku 75", fixture: "kafka/cluster/update-freight-max-ecku-with-name.golden"}, // Type upgrade tests {args: "kafka cluster update lkc-update --type standard", fixture: "kafka/cluster/update-type-success.golden"}, - {args: "kafka cluster update lkc-update --type Standard", fixture: "kafka/cluster/update-type-success.golden"}, + {args: "kafka cluster update lkc-update --type standard --max-ecku 6", fixture: "kafka/cluster/update-type-max-ecku-success.golden"}, + {args: "kafka cluster update lkc-update --type standard --max-ecku 35", fixture: "kafka/cluster/update-type-max-ecku-fail.golden", exitCode: 1}, {args: "kafka cluster update lkc-update --type", fixture: "kafka/cluster/update-type-empty-error.golden", exitCode: 1}, {args: "kafka cluster update lkc-update --type basic", fixture: "kafka/cluster/update-type-invalid-error.golden", exitCode: 1}, diff --git a/test/test-server/cmk_handlers.go b/test/test-server/cmk_handlers.go index 5d6313be05..aa6f86d765 100644 --- a/test/test-server/cmk_handlers.go +++ b/test/test-server/cmk_handlers.go @@ -60,18 +60,32 @@ func handleCmkKafkaClusterCreate(t *testing.T) http.HandlerFunc { require.NoError(t, err) return } - cluster.Spec.Config.CmkV2Enterprise = &cmkv2.CmkV2Enterprise{Kind: "Enterprise", MaxEcku: getMaxEcku("", "Enterprise")} + cluster.Spec.Config.CmkV2Enterprise = &cmkv2.CmkV2Enterprise{Kind: "Enterprise"} + if req.Spec.Config.CmkV2Enterprise.MaxEcku != nil { + // cluster.Spec.Config.CmkV2Enterprise.MaxEcku = getMaxEcku("", "Enterprise") + cluster.Spec.Config.CmkV2Enterprise.MaxEcku = req.Spec.Config.CmkV2Enterprise.MaxEcku + } } else if req.Spec.Config.CmkV2Freight != nil { if req.Spec.GetAvailability() == "SINGLE_ZONE" { err := writeError(w, "Durability must be HIGH for an Freight cluster") require.NoError(t, err) return } - cluster.Spec.Config.CmkV2Freight = &cmkv2.CmkV2Freight{Kind: "Freight", MaxEcku: getMaxEcku("", "Freight")} + cluster.Spec.Config.CmkV2Freight = &cmkv2.CmkV2Freight{Kind: "Freight"} + if req.Spec.Config.CmkV2Freight.MaxEcku != nil { + // cluster.Spec.Config.CmkV2Freight.MaxEcku = getMaxEcku("", "Freight") + cluster.Spec.Config.CmkV2Freight.MaxEcku = req.Spec.Config.CmkV2Freight.MaxEcku + } + } else if req.Spec.Config.CmkV2Basic != nil { + cluster.Spec.Config.CmkV2Basic = &cmkv2.CmkV2Basic{Kind: "Basic"} + if req.Spec.Config.CmkV2Basic.MaxEcku != nil { + cluster.Spec.Config.CmkV2Basic.MaxEcku = req.Spec.Config.CmkV2Basic.MaxEcku + } } else if req.Spec.Config.CmkV2Standard != nil { - cluster.Spec.Config.CmkV2Standard = &cmkv2.CmkV2Standard{Kind: "Standard", MaxEcku: getMaxEcku(cluster.GetId(), "Standard")} - } else { - cluster.Spec.Config.CmkV2Basic = &cmkv2.CmkV2Basic{Kind: "Basic", MaxEcku: getMaxEcku(cluster.GetId(), "Basic")} + cluster.Spec.Config.CmkV2Standard = &cmkv2.CmkV2Standard{Kind: "Standard"} + if req.Spec.Config.CmkV2Standard.MaxEcku != nil { + cluster.Spec.Config.CmkV2Standard.MaxEcku = req.Spec.Config.CmkV2Standard.MaxEcku + } } if req.Spec.GetCloud() == "oops" { @@ -153,6 +167,12 @@ func handleCmkCluster(t *testing.T) http.HandlerFunc { handleCmkKafkaClusterDescribeInfinite(t)(w, r) case "lkc-update", "lkc-with-ecku-limits": handleCmkKafkaClusterUpdateRequest(t)(w, r) + case "lkc-update-standard": + handleCmkKafkaStandardClusterUpdateRequest(t)(w, r) + case "lkc-update-enterprise": + handleCmkKafkaEnterpriseClusterUpdateRequest(t)(w, r) + case "lkc-update-freight": + handleCmkKafkaFreightClusterUpdateRequest(t)(w, r) case "lkc-update-dedicated-expand": handleCmkKafkaDedicatedClusterExpansion(t)(w, r) case "lkc-update-dedicated-shrink": @@ -276,6 +296,30 @@ func handleCmkKafkaClusterUpdateRequest(t *testing.T) http.HandlerFunc { Kind: "Standard", }, } + + if req.Spec.Config.CmkV2Standard.MaxEcku == nil { + cluster.Spec.Config.CmkV2Standard.SetMaxEcku(10) + } else if *req.Spec.Config.CmkV2Standard.MaxEcku > 10 { + err = writeError(w, "failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 10 eCKUs for STANDARD SKU") + require.NoError(t, err) + return + } else { + cluster.Spec.Config.CmkV2Standard.SetMaxEcku(*req.Spec.Config.CmkV2Standard.MaxEcku) + } + + err = json.NewEncoder(w).Encode(cluster) + require.NoError(t, err) + return + } + + if req.Spec.Config != nil && req.Spec.Config.CmkV2Basic != nil && req.Spec.Config.CmkV2Basic.MaxEcku != nil { + cluster := getCmkBasicDescribeCluster(req.GetId(), req.Spec.GetDisplayName()) + cluster.Spec.Config = &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Basic: &cmkv2.CmkV2Basic{ + Kind: "Basic", + MaxEcku: req.Spec.Config.CmkV2Basic.MaxEcku, + }, + } err := json.NewEncoder(w).Encode(cluster) require.NoError(t, err) return @@ -291,6 +335,118 @@ func handleCmkKafkaClusterUpdateRequest(t *testing.T) http.HandlerFunc { } } +// Handler for GET/PUT "/cmk/v2/clusters/lkc-update-standard" +func handleCmkKafkaStandardClusterUpdateRequest(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + cluster := getCmkStandardDescribeCluster("lkc-update-standard", "lkc-update-standard") + cluster.Status = &cmkv2.CmkV2ClusterStatus{Phase: "PROVISIONED"} + err := json.NewEncoder(w).Encode(cluster) + require.NoError(t, err) + case http.MethodPatch: + var req cmkv2.CmkV2Cluster + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + req.Id = cmkv2.PtrString("lkc-update-standard") + + if req.Spec.Config != nil && req.Spec.Config.CmkV2Standard != nil && req.Spec.Config.CmkV2Standard.MaxEcku != nil { + cluster := getCmkStandardDescribeCluster(req.GetId(), req.Spec.GetDisplayName()) + cluster.Spec.Config = &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Standard: &cmkv2.CmkV2Standard{ + Kind: "Standard", + MaxEcku: req.Spec.Config.CmkV2Standard.MaxEcku, + }, + } + err := json.NewEncoder(w).Encode(cluster) + + if req.Spec.Config.CmkV2Standard.MaxEcku != nil && *req.Spec.Config.CmkV2Standard.MaxEcku > 10 { + err = writeError(w, "failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 10 eCKUs for STANDARD SKU") + require.NoError(t, err) + return + } + + require.NoError(t, err) + return + } + } + } +} + +// Handler for GET/PATCH "/cmk/v2/clusters/lkc-update-enterprise" +func handleCmkKafkaEnterpriseClusterUpdateRequest(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + cluster := getCmkEnterpriseDescribeCluster("lkc-update-enterprise", "lkc-update-enterprise") + cluster.Status = &cmkv2.CmkV2ClusterStatus{Phase: "PROVISIONED"} + err := json.NewEncoder(w).Encode(cluster) + require.NoError(t, err) + case http.MethodPatch: + var req cmkv2.CmkV2Cluster + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + req.Id = cmkv2.PtrString("lkc-update-enterprise") + + if req.Spec.Config != nil && req.Spec.Config.CmkV2Enterprise != nil && req.Spec.Config.CmkV2Enterprise.MaxEcku != nil { + if req.Spec.Config.CmkV2Enterprise.MaxEcku != nil && *req.Spec.Config.CmkV2Enterprise.MaxEcku > 10 { + err = writeError(w, "failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 10 eCKUs for ENTERPRISE SKU") + require.NoError(t, err) + return + } + + cluster := getCmkEnterpriseDescribeCluster(req.GetId(), req.Spec.GetDisplayName()) + cluster.Spec.Config = &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Enterprise: &cmkv2.CmkV2Enterprise{ + Kind: "Enterprise", + MaxEcku: req.Spec.Config.CmkV2Enterprise.MaxEcku, + }, + } + err = json.NewEncoder(w).Encode(cluster) + require.NoError(t, err) + return + } + } + } +} + +// Handler for GET/PATCH "/cmk/v2/clusters/lkc-update-freight" +func handleCmkKafkaFreightClusterUpdateRequest(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + cluster := getCmkFreightDescribeCluster("lkc-update-freight", "lkc-update-freight") + cluster.Status = &cmkv2.CmkV2ClusterStatus{Phase: "PROVISIONED"} + err := json.NewEncoder(w).Encode(cluster) + require.NoError(t, err) + case http.MethodPatch: + var req cmkv2.CmkV2Cluster + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + req.Id = cmkv2.PtrString("lkc-update-freight") + + if req.Spec.Config != nil && req.Spec.Config.CmkV2Freight != nil && req.Spec.Config.CmkV2Freight.MaxEcku != nil { + if req.Spec.Config.CmkV2Freight.MaxEcku != nil && *req.Spec.Config.CmkV2Freight.MaxEcku > 150 { + err = writeError(w, "failed to update Kafka cluster: The specified Max eCKU exceeds the maximum allowed limit of 152 eCKUs for FREIGHT SKU") + require.NoError(t, err) + return + } + + cluster := getCmkFreightDescribeCluster(req.GetId(), req.Spec.GetDisplayName()) + cluster.Spec.Config = &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Freight: &cmkv2.CmkV2Freight{ + Kind: "Freight", + MaxEcku: req.Spec.Config.CmkV2Freight.MaxEcku, + }, + } + err = json.NewEncoder(w).Encode(cluster) + require.NoError(t, err) + return + } + } + } +} + // Handler for GET/PUT "/cmk/v2/clusters/lkc-update-dedicated-expand" func handleCmkKafkaDedicatedClusterExpansion(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { diff --git a/test/test-server/utils.go b/test/test-server/utils.go index 38b0a936c6..c2b1c10bd6 100644 --- a/test/test-server/utils.go +++ b/test/test-server/utils.go @@ -428,6 +428,132 @@ func getCmkDedicatedDescribeCluster(id, name string, cku int32) *cmkv2.CmkV2Clus } } +func getCmkStandardDescribeCluster(id, name string) *cmkv2.CmkV2Cluster { + return &cmkv2.CmkV2Cluster{ + Spec: &cmkv2.CmkV2ClusterSpec{ + DisplayName: cmkv2.PtrString(name), + Cloud: cmkv2.PtrString("aws"), + Region: cmkv2.PtrString("us-west-2"), + Config: &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Standard: &cmkv2.CmkV2Standard{Kind: "Standard"}, + }, + KafkaBootstrapEndpoint: cmkv2.PtrString("SASL_SSL://kafka-endpoint"), + HttpEndpoint: cmkv2.PtrString(TestKafkaRestProxyUrl.String()), + Availability: cmkv2.PtrString("SINGLE_ZONE"), + Endpoints: &cmkv2.ModelMap{ + "pni-abc123-standard": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pni-abc123-standard.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pni-abc123-standard.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PNI", + }, + "privatelink-uvw456-standard": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pl-uvw456-standard.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pl-uvw456-standard.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PRIVATELINK", + }, + "privatelink-xyz789-standard": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pl-xyz789-standard.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pl-xyz789-standard.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PRIVATELINK", + }, + "public-0001-standard": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://public-0001-standard.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://public-0001-standard.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PUBLIC", + }, + }, + }, + Id: cmkv2.PtrString(id), + Status: &cmkv2.CmkV2ClusterStatus{ + Phase: "PROVISIONED", + }, + } +} + +func getCmkEnterpriseDescribeCluster(id, name string) *cmkv2.CmkV2Cluster { + return &cmkv2.CmkV2Cluster{ + Spec: &cmkv2.CmkV2ClusterSpec{ + DisplayName: cmkv2.PtrString(name), + Cloud: cmkv2.PtrString("aws"), + Region: cmkv2.PtrString("us-west-2"), + Config: &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Enterprise: &cmkv2.CmkV2Enterprise{Kind: "Enterprise"}, + }, + KafkaBootstrapEndpoint: cmkv2.PtrString("SASL_SSL://kafka-endpoint"), + HttpEndpoint: cmkv2.PtrString(TestKafkaRestProxyUrl.String()), + Availability: cmkv2.PtrString("MULTI_ZONE"), + Endpoints: &cmkv2.ModelMap{ + "pni-abc123-enterprise": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pni-abc123-enterprise.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pni-abc123-enterprise.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PNI", + }, + "privatelink-uvw456-enterprise": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pl-uvw456-enterprise.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pl-uvw456-enterprise.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PRIVATELINK", + }, + "privatelink-xyz789-enterprise": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pl-xyz789-enterprise.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pl-xyz789-enterprise.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PRIVATELINK", + }, + "public-0001-enterprise": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://public-0001-enterprise.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://public-0001-enterprise.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PUBLIC", + }, + }, + }, + Id: cmkv2.PtrString(id), + Status: &cmkv2.CmkV2ClusterStatus{ + Phase: "PROVISIONED", + }, + } +} + +func getCmkFreightDescribeCluster(id, name string) *cmkv2.CmkV2Cluster { + return &cmkv2.CmkV2Cluster{ + Spec: &cmkv2.CmkV2ClusterSpec{ + DisplayName: cmkv2.PtrString(name), + Cloud: cmkv2.PtrString("aws"), + Region: cmkv2.PtrString("us-west-2"), + Config: &cmkv2.CmkV2ClusterSpecConfigOneOf{ + CmkV2Freight: &cmkv2.CmkV2Freight{Kind: "Freight"}, + }, + KafkaBootstrapEndpoint: cmkv2.PtrString("SASL_SSL://kafka-endpoint"), + HttpEndpoint: cmkv2.PtrString(TestKafkaRestProxyUrl.String()), + Availability: cmkv2.PtrString("MULTI_ZONE"), + Endpoints: &cmkv2.ModelMap{ + "pni-abc123-freight": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pni-abc123-freight.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pni-abc123-freight.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PNI", + }, + "privatelink-uvw456-freight": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pl-uvw456-freight.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pl-uvw456-freight.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PRIVATELINK", + }, + "privatelink-xyz789-freight": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://pl-xyz789-freight.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://pl-xyz789-freight.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PRIVATELINK", + }, + "public-0001-freight": cmkv2.CmkV2Endpoints{ + KafkaBootstrapEndpoint: "SASL_SSL://public-0001-freight.kafka.us-west-2.aws.confluent.cloud:9092", + HttpEndpoint: "https://public-0001-freight.rest.us-west-2.aws.confluent.cloud", + ConnectionType: "PUBLIC", + }, + }, + }, + Id: cmkv2.PtrString(id), + Status: &cmkv2.CmkV2ClusterStatus{ + Phase: "PROVISIONED", + }, + } +} + func getCmkUnknownDescribeCluster(id, name string) *cmkv2.CmkV2Cluster { return &cmkv2.CmkV2Cluster{ Spec: &cmkv2.CmkV2ClusterSpec{