From 557d3c62e722c1dcd4e6d11ace6f75a717181f2c Mon Sep 17 00:00:00 2001 From: Jordan Levin Date: Thu, 13 Jun 2024 17:13:41 -0700 Subject: [PATCH] kafkatopics (#17) Signed-off-by: Jordan Levin --- .gitmodules | 2 +- Makefile | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 383 ++++++++++++ .../v1alpha1/zz_generated.managed.go | 60 ++ .../v1alpha1/zz_generated.managedlist.go | 9 + .../v1alpha1/zz_generated.resolvers.go | 29 + .../v1alpha1/zz_generated_terraformed.go | 84 +++ .../confluent/v1alpha1/zz_kafkatopic_types.go | 212 +++++++ config/confluent_kafka_topic/config.go | 20 + config/external_name.go | 1 + config/generated.lst | 2 +- config/provider.go | 3 + examples-generated/confluent/kafkatopic.yaml | 25 + .../confluent/kafkatopic/zz_controller.go | 66 ++ internal/controller/zz_setup.go | 2 + .../confluent.crossplane.io_kafkatopics.yaml | 568 ++++++++++++++++++ 16 files changed, 1465 insertions(+), 3 deletions(-) create mode 100755 apis/confluent/v1alpha1/zz_kafkatopic_types.go create mode 100644 config/confluent_kafka_topic/config.go create mode 100644 examples-generated/confluent/kafkatopic.yaml create mode 100755 internal/controller/confluent/kafkatopic/zz_controller.go create mode 100644 package/crds/confluent.crossplane.io_kafkatopics.yaml diff --git a/.gitmodules b/.gitmodules index c2fad47..8f84209 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "build"] path = build - url = https://github.com/upbound/build + url = https://github.com/crossplane/build diff --git a/Makefile b/Makefile index d77e777..b9afdb2 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ GO111MODULE ?= on # Setup Kubernetes tools KIND_VERSION = v0.16.0 -UP_VERSION = v0.18.0 +UP_VERSION = v0.31.0 UP_CHANNEL = stable UPTEST_VERSION = v0.5.0 -include build/makelib/k8s_tools.mk diff --git a/apis/confluent/v1alpha1/zz_generated.deepcopy.go b/apis/confluent/v1alpha1/zz_generated.deepcopy.go index 5817f42..27397a1 100644 --- a/apis/confluent/v1alpha1/zz_generated.deepcopy.go +++ b/apis/confluent/v1alpha1/zz_generated.deepcopy.go @@ -1895,6 +1895,389 @@ func (in *KafkaClusterParameters) DeepCopy() *KafkaClusterParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopic) DeepCopyInto(out *KafkaTopic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopic. +func (in *KafkaTopic) DeepCopy() *KafkaTopic { + if in == nil { + return nil + } + out := new(KafkaTopic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaTopic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicCredentialsInitParameters) DeepCopyInto(out *KafkaTopicCredentialsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicCredentialsInitParameters. +func (in *KafkaTopicCredentialsInitParameters) DeepCopy() *KafkaTopicCredentialsInitParameters { + if in == nil { + return nil + } + out := new(KafkaTopicCredentialsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicCredentialsObservation) DeepCopyInto(out *KafkaTopicCredentialsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicCredentialsObservation. +func (in *KafkaTopicCredentialsObservation) DeepCopy() *KafkaTopicCredentialsObservation { + if in == nil { + return nil + } + out := new(KafkaTopicCredentialsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicCredentialsParameters) DeepCopyInto(out *KafkaTopicCredentialsParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + out.SecretSecretRef = in.SecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicCredentialsParameters. +func (in *KafkaTopicCredentialsParameters) DeepCopy() *KafkaTopicCredentialsParameters { + if in == nil { + return nil + } + out := new(KafkaTopicCredentialsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicInitParameters) DeepCopyInto(out *KafkaTopicInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]KafkaTopicCredentialsInitParameters, len(*in)) + copy(*out, *in) + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]KafkaTopicKafkaClusterInitParameters, len(*in)) + copy(*out, *in) + } + if in.PartitionsCount != nil { + in, out := &in.PartitionsCount, &out.PartitionsCount + *out = new(float64) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicInitParameters. +func (in *KafkaTopicInitParameters) DeepCopy() *KafkaTopicInitParameters { + if in == nil { + return nil + } + out := new(KafkaTopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicKafkaClusterInitParameters) DeepCopyInto(out *KafkaTopicKafkaClusterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicKafkaClusterInitParameters. +func (in *KafkaTopicKafkaClusterInitParameters) DeepCopy() *KafkaTopicKafkaClusterInitParameters { + if in == nil { + return nil + } + out := new(KafkaTopicKafkaClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicKafkaClusterObservation) DeepCopyInto(out *KafkaTopicKafkaClusterObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicKafkaClusterObservation. +func (in *KafkaTopicKafkaClusterObservation) DeepCopy() *KafkaTopicKafkaClusterObservation { + if in == nil { + return nil + } + out := new(KafkaTopicKafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicKafkaClusterParameters) DeepCopyInto(out *KafkaTopicKafkaClusterParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicKafkaClusterParameters. +func (in *KafkaTopicKafkaClusterParameters) DeepCopy() *KafkaTopicKafkaClusterParameters { + if in == nil { + return nil + } + out := new(KafkaTopicKafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicList) DeepCopyInto(out *KafkaTopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaTopic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicList. +func (in *KafkaTopicList) DeepCopy() *KafkaTopicList { + if in == nil { + return nil + } + out := new(KafkaTopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaTopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicObservation) DeepCopyInto(out *KafkaTopicObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]KafkaTopicCredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]KafkaTopicKafkaClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionsCount != nil { + in, out := &in.PartitionsCount, &out.PartitionsCount + *out = new(float64) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicObservation. +func (in *KafkaTopicObservation) DeepCopy() *KafkaTopicObservation { + if in == nil { + return nil + } + out := new(KafkaTopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicParameters) DeepCopyInto(out *KafkaTopicParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]KafkaTopicCredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = make([]KafkaTopicKafkaClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionsCount != nil { + in, out := &in.PartitionsCount, &out.PartitionsCount + *out = new(float64) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicParameters. +func (in *KafkaTopicParameters) DeepCopy() *KafkaTopicParameters { + if in == nil { + return nil + } + out := new(KafkaTopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicSpec) DeepCopyInto(out *KafkaTopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicSpec. +func (in *KafkaTopicSpec) DeepCopy() *KafkaTopicSpec { + if in == nil { + return nil + } + out := new(KafkaTopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTopicStatus) DeepCopyInto(out *KafkaTopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTopicStatus. +func (in *KafkaTopicStatus) DeepCopy() *KafkaTopicStatus { + if in == nil { + return nil + } + out := new(KafkaTopicStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedResourceInitParameters) DeepCopyInto(out *ManagedResourceInitParameters) { *out = *in diff --git a/apis/confluent/v1alpha1/zz_generated.managed.go b/apis/confluent/v1alpha1/zz_generated.managed.go index ad5180d..974bb54 100644 --- a/apis/confluent/v1alpha1/zz_generated.managed.go +++ b/apis/confluent/v1alpha1/zz_generated.managed.go @@ -307,6 +307,66 @@ func (mg *KafkaACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this KafkaTopic. +func (mg *KafkaTopic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this KafkaTopic. +func (mg *KafkaTopic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this KafkaTopic. +func (mg *KafkaTopic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this KafkaTopic. +func (mg *KafkaTopic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this KafkaTopic. +func (mg *KafkaTopic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this KafkaTopic. +func (mg *KafkaTopic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this KafkaTopic. +func (mg *KafkaTopic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this KafkaTopic. +func (mg *KafkaTopic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this KafkaTopic. +func (mg *KafkaTopic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this KafkaTopic. +func (mg *KafkaTopic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this KafkaTopic. +func (mg *KafkaTopic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this KafkaTopic. +func (mg *KafkaTopic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this RoleBinding. func (mg *RoleBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/confluent/v1alpha1/zz_generated.managedlist.go b/apis/confluent/v1alpha1/zz_generated.managedlist.go index 302bc4c..3c51f84 100644 --- a/apis/confluent/v1alpha1/zz_generated.managedlist.go +++ b/apis/confluent/v1alpha1/zz_generated.managedlist.go @@ -52,6 +52,15 @@ func (l *KafkaACLList) GetItems() []resource.Managed { return items } +// GetItems of this KafkaTopicList. +func (l *KafkaTopicList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this RoleBindingList. func (l *RoleBindingList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/confluent/v1alpha1/zz_generated.resolvers.go b/apis/confluent/v1alpha1/zz_generated.resolvers.go index 32cfcae..27e842a 100644 --- a/apis/confluent/v1alpha1/zz_generated.resolvers.go +++ b/apis/confluent/v1alpha1/zz_generated.resolvers.go @@ -152,3 +152,32 @@ func (mg *KafkaACL) ResolveReferences(ctx context.Context, c client.Reader) erro return nil } + +// ResolveReferences of this KafkaTopic. +func (mg *KafkaTopic) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.KafkaCluster); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KafkaCluster[i3].ID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KafkaCluster[i3].IDRef, + Selector: mg.Spec.ForProvider.KafkaCluster[i3].IDSelector, + To: reference.To{ + List: &ClusterList{}, + Managed: &Cluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KafkaCluster[i3].ID") + } + mg.Spec.ForProvider.KafkaCluster[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KafkaCluster[i3].IDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/confluent/v1alpha1/zz_generated_terraformed.go b/apis/confluent/v1alpha1/zz_generated_terraformed.go index 825e732..df20b7c 100755 --- a/apis/confluent/v1alpha1/zz_generated_terraformed.go +++ b/apis/confluent/v1alpha1/zz_generated_terraformed.go @@ -438,6 +438,90 @@ func (tr *ClusterConfig) GetTerraformSchemaVersion() int { return 0 } +// GetTerraformResourceType returns Terraform resource type for this KafkaTopic +func (mg *KafkaTopic) GetTerraformResourceType() string { + return "confluent_kafka_topic" +} + +// GetConnectionDetailsMapping for this KafkaTopic +func (tr *KafkaTopic) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"credentials[*].key": "spec.forProvider.credentials[*].keySecretRef", "credentials[*].secret": "spec.forProvider.credentials[*].secretSecretRef"} +} + +// GetObservation of this KafkaTopic +func (tr *KafkaTopic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KafkaTopic +func (tr *KafkaTopic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KafkaTopic +func (tr *KafkaTopic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KafkaTopic +func (tr *KafkaTopic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KafkaTopic +func (tr *KafkaTopic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KafkaTopic +func (tr *KafkaTopic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this KafkaTopic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KafkaTopic) LateInitialize(attrs []byte) (bool, error) { + params := &KafkaTopicParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KafkaTopic) GetTerraformSchemaVersion() int { + return 2 +} + // GetTerraformResourceType returns Terraform resource type for this RoleBinding func (mg *RoleBinding) GetTerraformResourceType() string { return "confluent_role_binding" diff --git a/apis/confluent/v1alpha1/zz_kafkatopic_types.go b/apis/confluent/v1alpha1/zz_kafkatopic_types.go new file mode 100755 index 0000000..c6d392d --- /dev/null +++ b/apis/confluent/v1alpha1/zz_kafkatopic_types.go @@ -0,0 +1,212 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KafkaTopicCredentialsInitParameters struct { +} + +type KafkaTopicCredentialsObservation struct { +} + +type KafkaTopicCredentialsParameters struct { + + // The Kafka API Key. + // The Cluster API Key for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // The Kafka API Secret. + // The Cluster API Secret for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + SecretSecretRef v1.SecretKeySelector `json:"secretSecretRef" tf:"-"` +} + +type KafkaTopicInitParameters struct { + + // The custom topic settings to set: + // The custom topic settings to set (e.g., `"cleanup.policy" = "compact"`). + Config map[string]*string `json:"config,omitempty" tf:"config,omitempty"` + + // supports the following: + // The Cluster API Credentials. + Credentials []KafkaTopicCredentialsInitParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // supports the following: + KafkaCluster []KafkaTopicKafkaClusterInitParameters `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The number of partitions to create in the topic. Defaults to 6. + // The number of partitions to create in the topic. Defaults to `6`. + PartitionsCount *float64 `json:"partitionsCount,omitempty" tf:"partitions_count,omitempty"` + + // The REST endpoint of the Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443). + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + // The name of the topic, for example, orders-1. The topic name can be up to 249 characters in length, and can include the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), and - (dash). As a best practice, we recommend against using any personally identifiable information (PII) when naming your topic. + // The name of the topic, for example, `orders-1`. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` +} + +type KafkaTopicKafkaClusterInitParameters struct { +} + +type KafkaTopicKafkaClusterObservation struct { + + // The ID of the Kafka cluster, for example, lkc-abc123. + // The Kafka cluster ID (e.g., `lkc-12345`). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type KafkaTopicKafkaClusterParameters struct { + + // The ID of the Kafka cluster, for example, lkc-abc123. + // The Kafka cluster ID (e.g., `lkc-12345`). + // +crossplane:generate:reference:type=Cluster + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Cluster to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Cluster to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type KafkaTopicObservation struct { + + // The custom topic settings to set: + // The custom topic settings to set (e.g., `"cleanup.policy" = "compact"`). + Config map[string]*string `json:"config,omitempty" tf:"config,omitempty"` + + // supports the following: + // The Cluster API Credentials. + Credentials []KafkaTopicCredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The ID of the Kafka cluster, for example, lkc-abc123. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // supports the following: + KafkaCluster []KafkaTopicKafkaClusterObservation `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The number of partitions to create in the topic. Defaults to 6. + // The number of partitions to create in the topic. Defaults to `6`. + PartitionsCount *float64 `json:"partitionsCount,omitempty" tf:"partitions_count,omitempty"` + + // The REST endpoint of the Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443). + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + // The name of the topic, for example, orders-1. The topic name can be up to 249 characters in length, and can include the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), and - (dash). As a best practice, we recommend against using any personally identifiable information (PII) when naming your topic. + // The name of the topic, for example, `orders-1`. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` +} + +type KafkaTopicParameters struct { + + // The custom topic settings to set: + // The custom topic settings to set (e.g., `"cleanup.policy" = "compact"`). + // +kubebuilder:validation:Optional + Config map[string]*string `json:"config,omitempty" tf:"config,omitempty"` + + // supports the following: + // The Cluster API Credentials. + // +kubebuilder:validation:Optional + Credentials []KafkaTopicCredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // supports the following: + // +kubebuilder:validation:Optional + KafkaCluster []KafkaTopicKafkaClusterParameters `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // The number of partitions to create in the topic. Defaults to 6. + // The number of partitions to create in the topic. Defaults to `6`. + // +kubebuilder:validation:Optional + PartitionsCount *float64 `json:"partitionsCount,omitempty" tf:"partitions_count,omitempty"` + + // The REST endpoint of the Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443). + // The REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + // +kubebuilder:validation:Optional + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + // The name of the topic, for example, orders-1. The topic name can be up to 249 characters in length, and can include the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), and - (dash). As a best practice, we recommend against using any personally identifiable information (PII) when naming your topic. + // The name of the topic, for example, `orders-1`. + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` +} + +// KafkaTopicSpec defines the desired state of KafkaTopic +type KafkaTopicSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider KafkaTopicParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider KafkaTopicInitParameters `json:"initProvider,omitempty"` +} + +// KafkaTopicStatus defines the observed state of KafkaTopic. +type KafkaTopicStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider KafkaTopicObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// KafkaTopic is the Schema for the KafkaTopics API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type KafkaTopic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.topicName) || (has(self.initProvider) && has(self.initProvider.topicName))",message="spec.forProvider.topicName is a required parameter" + Spec KafkaTopicSpec `json:"spec"` + Status KafkaTopicStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KafkaTopicList contains a list of KafkaTopics +type KafkaTopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaTopic `json:"items"` +} + +// Repository type metadata. +var ( + KafkaTopic_Kind = "KafkaTopic" + KafkaTopic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: KafkaTopic_Kind}.String() + KafkaTopic_KindAPIVersion = KafkaTopic_Kind + "." + CRDGroupVersion.String() + KafkaTopic_GroupVersionKind = CRDGroupVersion.WithKind(KafkaTopic_Kind) +) + +func init() { + SchemeBuilder.Register(&KafkaTopic{}, &KafkaTopicList{}) +} diff --git a/config/confluent_kafka_topic/config.go b/config/confluent_kafka_topic/config.go new file mode 100644 index 0000000..6c96e3e --- /dev/null +++ b/config/confluent_kafka_topic/config.go @@ -0,0 +1,20 @@ +package confluent_kafka_acl + +import ( + "github.com/crossplane/upjet/pkg/config" +) + +// Configure configures individual resources by adding custom ResourceConfigurators. +func Configure(p *config.Provider) { + p.AddResourceConfigurator("confluent_kafka_topic", func(r *config.Resource) { + // We need to override the default group that upjet generated for + r.ShortGroup = "confluent" + r.UseAsync = true + r.Kind = "KafkaTopic" + + // Allows us to reference managedResource ID via spec.forProvider.managedResource.id + r.References["kafka_cluster.id"] = config.Reference{ + Type: "Cluster", + } + }) +} diff --git a/config/external_name.go b/config/external_name.go index afaf9d1..9418198 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -16,6 +16,7 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "confluent_service_account": config.IdentifierFromProvider, "confluent_api_key": config.IdentifierFromProvider, "confluent_kafka_acl": config.IdentifierFromProvider, + "confluent_kafka_topic": config.IdentifierFromProvider, "confluent_role_binding": config.IdentifierFromProvider, "confluent_schema_registry_cluster": config.IdentifierFromProvider, } diff --git a/config/generated.lst b/config/generated.lst index 4be3f40..1219ec5 100644 --- a/config/generated.lst +++ b/config/generated.lst @@ -1 +1 @@ -["confluent_api_key","confluent_environment","confluent_kafka_acl","confluent_kafka_cluster","confluent_kafka_cluster_config","confluent_role_binding","confluent_schema_registry_cluster","confluent_service_account"] \ No newline at end of file +["confluent_api_key","confluent_environment","confluent_kafka_acl","confluent_kafka_cluster","confluent_kafka_cluster_config","confluent_kafka_topic","confluent_role_binding","confluent_schema_registry_cluster","confluent_service_account"] \ No newline at end of file diff --git a/config/provider.go b/config/provider.go index 6499fa1..775dc92 100644 --- a/config/provider.go +++ b/config/provider.go @@ -15,7 +15,9 @@ import ( confluentkafkaacl "github.com/crossplane-contrib/provider-confluent/config/confluent_kafka_acl" confluentkafkacluster "github.com/crossplane-contrib/provider-confluent/config/confluent_kafka_cluster" confluentkafkaclusterconfig "github.com/crossplane-contrib/provider-confluent/config/confluent_kafka_cluster_config" + confluentkafkatopic "github.com/crossplane-contrib/provider-confluent/config/confluent_kafka_topic" confluentrolebinding "github.com/crossplane-contrib/provider-confluent/config/confluent_role_binding" + confluentschemaregistrycluster "github.com/crossplane-contrib/provider-confluent/config/confluent_schema_registry_cluster" confluentserviceaccount "github.com/crossplane-contrib/provider-confluent/config/confluent_service_account" ) @@ -49,6 +51,7 @@ func GetProvider() *ujconfig.Provider { confluentserviceaccount.Configure, confluentapikey.Configure, confluentkafkaacl.Configure, + confluentkafkatopic.Configure, confluentrolebinding.Configure, confluentschemaregistrycluster.Configure, } { diff --git a/examples-generated/confluent/kafkatopic.yaml b/examples-generated/confluent/kafkatopic.yaml new file mode 100644 index 0000000..7260be9 --- /dev/null +++ b/examples-generated/confluent/kafkatopic.yaml @@ -0,0 +1,25 @@ +apiVersion: confluent.crossplane.io/v1alpha1 +kind: KafkaTopic +metadata: + annotations: + meta.upbound.io/example-id: confluent/v1alpha1/kafkatopic + labels: + testing.upbound.io/example-name: orders + name: orders +spec: + forProvider: + credentials: + - keySecretRef: + key: attribute.id + name: example-api-key + namespace: upbound-system + secretSecretRef: + key: attribute.secret + name: example-api-key + namespace: upbound-system + kafkaCluster: + - idSelector: + matchLabels: + testing.upbound.io/example-name: basic-cluster + restEndpoint: ${confluent_kafka_cluster.basic-cluster.rest_endpoint} + topicName: orders diff --git a/internal/controller/confluent/kafkatopic/zz_controller.go b/internal/controller/confluent/kafkatopic/zz_controller.go new file mode 100755 index 0000000..0fd450f --- /dev/null +++ b/internal/controller/confluent/kafkatopic/zz_controller.go @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package kafkatopic + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/confluent/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles KafkaTopic managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.KafkaTopic_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.KafkaTopic_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.KafkaTopic_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_kafka_topic"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.KafkaTopic_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.KafkaTopic{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 8172604..61c6560 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -14,6 +14,7 @@ import ( clusterconfig "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/clusterconfig" environment "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/environment" kafkaacl "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/kafkaacl" + kafkatopic "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/kafkatopic" rolebinding "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/rolebinding" schemaregistrycluster "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/schemaregistrycluster" serviceaccount "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/serviceaccount" @@ -29,6 +30,7 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { clusterconfig.Setup, environment.Setup, kafkaacl.Setup, + kafkatopic.Setup, rolebinding.Setup, schemaregistrycluster.Setup, serviceaccount.Setup, diff --git a/package/crds/confluent.crossplane.io_kafkatopics.yaml b/package/crds/confluent.crossplane.io_kafkatopics.yaml new file mode 100644 index 0000000..8f1d95e --- /dev/null +++ b/package/crds/confluent.crossplane.io_kafkatopics.yaml @@ -0,0 +1,568 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: kafkatopics.confluent.crossplane.io +spec: + group: confluent.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: KafkaTopic + listKind: KafkaTopicList + plural: kafkatopics + singular: kafkatopic + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: KafkaTopic is the Schema for the KafkaTopics API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KafkaTopicSpec defines the desired state of KafkaTopic + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + config: + additionalProperties: + type: string + description: 'The custom topic settings to set: The custom topic + settings to set (e.g., `"cleanup.policy" = "compact"`).' + type: object + credentials: + description: 'supports the following: The Cluster API Credentials.' + items: + properties: + keySecretRef: + description: The Kafka API Key. The Cluster API Key for + your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Kafka API Secret. The Cluster API Secret + for your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + kafkaCluster: + description: 'supports the following:' + items: + properties: + id: + description: The ID of the Kafka cluster, for example, lkc-abc123. + The Kafka cluster ID (e.g., `lkc-12345`). + type: string + idRef: + description: Reference to a Cluster to populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution + of this reference is required. The default is + 'Required', which means the reconcile will fail + if the reference cannot be resolved. 'Optional' + means this reference will be a no-op if it cannot + be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference only + when the corresponding field is not present. Use + 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Cluster to populate id. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with + the same controller reference as the selecting object + is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution + of this reference is required. The default is + 'Required', which means the reconcile will fail + if the reference cannot be resolved. 'Optional' + means this reference will be a no-op if it cannot + be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference only + when the corresponding field is not present. Use + 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + partitionsCount: + description: The number of partitions to create in the topic. + Defaults to 6. The number of partitions to create in the topic. + Defaults to `6`. + type: number + restEndpoint: + description: The REST endpoint of the Kafka cluster, for example, + https://pkc-00000.us-central1.gcp.confluent.cloud:443). The + REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + topicName: + description: 'The name of the topic, for example, orders-1. The + topic name can be up to 249 characters in length, and can include + the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), + and - (dash). As a best practice, we recommend against using + any personally identifiable information (PII) when naming your + topic. The name of the topic, for example, `orders-1`.' + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + config: + additionalProperties: + type: string + description: 'The custom topic settings to set: The custom topic + settings to set (e.g., `"cleanup.policy" = "compact"`).' + type: object + credentials: + description: 'supports the following: The Cluster API Credentials.' + items: + type: object + type: array + kafkaCluster: + description: 'supports the following:' + items: + type: object + type: array + partitionsCount: + description: The number of partitions to create in the topic. + Defaults to 6. The number of partitions to create in the topic. + Defaults to `6`. + type: number + restEndpoint: + description: The REST endpoint of the Kafka cluster, for example, + https://pkc-00000.us-central1.gcp.confluent.cloud:443). The + REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + topicName: + description: 'The name of the topic, for example, orders-1. The + topic name can be up to 249 characters in length, and can include + the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), + and - (dash). As a best practice, we recommend against using + any personally identifiable information (PII) when naming your + topic. The name of the topic, for example, `orders-1`.' + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.topicName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.topicName) + || (has(self.initProvider) && has(self.initProvider.topicName))' + status: + description: KafkaTopicStatus defines the observed state of KafkaTopic. + properties: + atProvider: + properties: + config: + additionalProperties: + type: string + description: 'The custom topic settings to set: The custom topic + settings to set (e.g., `"cleanup.policy" = "compact"`).' + type: object + credentials: + description: 'supports the following: The Cluster API Credentials.' + items: + properties: + keySecretRef: + description: The Kafka API Key. The Cluster API Key for + your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Kafka API Secret. The Cluster API Secret + for your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + id: + description: The ID of the Kafka cluster, for example, lkc-abc123. + type: string + kafkaCluster: + description: 'supports the following:' + items: + properties: + id: + description: The ID of the Kafka cluster, for example, lkc-abc123. + The Kafka cluster ID (e.g., `lkc-12345`). + type: string + type: object + type: array + partitionsCount: + description: The number of partitions to create in the topic. + Defaults to 6. The number of partitions to create in the topic. + Defaults to `6`. + type: number + restEndpoint: + description: The REST endpoint of the Kafka cluster, for example, + https://pkc-00000.us-central1.gcp.confluent.cloud:443). The + REST endpoint of the Kafka cluster (e.g., `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + topicName: + description: 'The name of the topic, for example, orders-1. The + topic name can be up to 249 characters in length, and can include + the following characters: a-z, A-Z, 0-9, . (dot), _ (underscore), + and - (dash). As a best practice, we recommend against using + any personally identifiable information (PII) when naming your + topic. The name of the topic, for example, `orders-1`.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}