diff --git a/apis/confluent/v1alpha1/zz_generated.deepcopy.go b/apis/confluent/v1alpha1/zz_generated.deepcopy.go index 27397a1..b80389d 100644 --- a/apis/confluent/v1alpha1/zz_generated.deepcopy.go +++ b/apis/confluent/v1alpha1/zz_generated.deepcopy.go @@ -2796,6 +2796,409 @@ func (in *RoleBindingStatus) DeepCopy() *RoleBindingStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schema) DeepCopyInto(out *Schema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schema. +func (in *Schema) DeepCopy() *Schema { + if in == nil { + return nil + } + out := new(Schema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaCredentialsInitParameters) DeepCopyInto(out *SchemaCredentialsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaCredentialsInitParameters. +func (in *SchemaCredentialsInitParameters) DeepCopy() *SchemaCredentialsInitParameters { + if in == nil { + return nil + } + out := new(SchemaCredentialsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaCredentialsObservation) DeepCopyInto(out *SchemaCredentialsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaCredentialsObservation. +func (in *SchemaCredentialsObservation) DeepCopy() *SchemaCredentialsObservation { + if in == nil { + return nil + } + out := new(SchemaCredentialsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaCredentialsParameters) DeepCopyInto(out *SchemaCredentialsParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + out.SecretSecretRef = in.SecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaCredentialsParameters. +func (in *SchemaCredentialsParameters) DeepCopy() *SchemaCredentialsParameters { + if in == nil { + return nil + } + out := new(SchemaCredentialsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaInitParameters) DeepCopyInto(out *SchemaInitParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]SchemaCredentialsInitParameters, len(*in)) + copy(*out, *in) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.HardDelete != nil { + in, out := &in.HardDelete, &out.HardDelete + *out = new(bool) + **out = **in + } + if in.RecreateOnUpdate != nil { + in, out := &in.RecreateOnUpdate, &out.RecreateOnUpdate + *out = new(bool) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SchemaReference != nil { + in, out := &in.SchemaReference, &out.SchemaReference + *out = make([]SchemaReferenceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaRegistryCluster != nil { + in, out := &in.SchemaRegistryCluster, &out.SchemaRegistryCluster + *out = make([]SchemaRegistryClusterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubjectName != nil { + in, out := &in.SubjectName, &out.SubjectName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaInitParameters. +func (in *SchemaInitParameters) DeepCopy() *SchemaInitParameters { + if in == nil { + return nil + } + out := new(SchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaList) DeepCopyInto(out *SchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaList. +func (in *SchemaList) DeepCopy() *SchemaList { + if in == nil { + return nil + } + out := new(SchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaObservation) DeepCopyInto(out *SchemaObservation) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]SchemaCredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.HardDelete != nil { + in, out := &in.HardDelete, &out.HardDelete + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecreateOnUpdate != nil { + in, out := &in.RecreateOnUpdate, &out.RecreateOnUpdate + *out = new(bool) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SchemaIdentifier != nil { + in, out := &in.SchemaIdentifier, &out.SchemaIdentifier + *out = new(float64) + **out = **in + } + if in.SchemaReference != nil { + in, out := &in.SchemaReference, &out.SchemaReference + *out = make([]SchemaReferenceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaRegistryCluster != nil { + in, out := &in.SchemaRegistryCluster, &out.SchemaRegistryCluster + *out = make([]SchemaRegistryClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubjectName != nil { + in, out := &in.SubjectName, &out.SubjectName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaObservation. +func (in *SchemaObservation) DeepCopy() *SchemaObservation { + if in == nil { + return nil + } + out := new(SchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaParameters) DeepCopyInto(out *SchemaParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]SchemaCredentialsParameters, len(*in)) + copy(*out, *in) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.HardDelete != nil { + in, out := &in.HardDelete, &out.HardDelete + *out = new(bool) + **out = **in + } + if in.RecreateOnUpdate != nil { + in, out := &in.RecreateOnUpdate, &out.RecreateOnUpdate + *out = new(bool) + **out = **in + } + if in.RestEndpoint != nil { + in, out := &in.RestEndpoint, &out.RestEndpoint + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SchemaReference != nil { + in, out := &in.SchemaReference, &out.SchemaReference + *out = make([]SchemaReferenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaRegistryCluster != nil { + in, out := &in.SchemaRegistryCluster, &out.SchemaRegistryCluster + *out = make([]SchemaRegistryClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubjectName != nil { + in, out := &in.SubjectName, &out.SubjectName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaParameters. +func (in *SchemaParameters) DeepCopy() *SchemaParameters { + if in == nil { + return nil + } + out := new(SchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReferenceInitParameters) DeepCopyInto(out *SchemaReferenceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubjectName != nil { + in, out := &in.SubjectName, &out.SubjectName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReferenceInitParameters. +func (in *SchemaReferenceInitParameters) DeepCopy() *SchemaReferenceInitParameters { + if in == nil { + return nil + } + out := new(SchemaReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReferenceObservation) DeepCopyInto(out *SchemaReferenceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubjectName != nil { + in, out := &in.SubjectName, &out.SubjectName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReferenceObservation. +func (in *SchemaReferenceObservation) DeepCopy() *SchemaReferenceObservation { + if in == nil { + return nil + } + out := new(SchemaReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReferenceParameters) DeepCopyInto(out *SchemaReferenceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubjectName != nil { + in, out := &in.SubjectName, &out.SubjectName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReferenceParameters. +func (in *SchemaReferenceParameters) DeepCopy() *SchemaReferenceParameters { + if in == nil { + return nil + } + out := new(SchemaReferenceParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchemaRegistryCluster) DeepCopyInto(out *SchemaRegistryCluster) { *out = *in @@ -2885,6 +3288,26 @@ func (in *SchemaRegistryClusterEnvironmentParameters) DeepCopy() *SchemaRegistry // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchemaRegistryClusterInitParameters) DeepCopyInto(out *SchemaRegistryClusterInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterInitParameters. +func (in *SchemaRegistryClusterInitParameters) DeepCopy() *SchemaRegistryClusterInitParameters { + if in == nil { + return nil + } + out := new(SchemaRegistryClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRegistryClusterInitParameters_2) DeepCopyInto(out *SchemaRegistryClusterInitParameters_2) { *out = *in if in.Environment != nil { in, out := &in.Environment, &out.Environment @@ -2907,12 +3330,12 @@ func (in *SchemaRegistryClusterInitParameters) DeepCopyInto(out *SchemaRegistryC } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterInitParameters. -func (in *SchemaRegistryClusterInitParameters) DeepCopy() *SchemaRegistryClusterInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterInitParameters_2. +func (in *SchemaRegistryClusterInitParameters_2) DeepCopy() *SchemaRegistryClusterInitParameters_2 { if in == nil { return nil } - out := new(SchemaRegistryClusterInitParameters) + out := new(SchemaRegistryClusterInitParameters_2) in.DeepCopyInto(out) return out } @@ -2951,6 +3374,26 @@ func (in *SchemaRegistryClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchemaRegistryClusterObservation) DeepCopyInto(out *SchemaRegistryClusterObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterObservation. +func (in *SchemaRegistryClusterObservation) DeepCopy() *SchemaRegistryClusterObservation { + if in == nil { + return nil + } + out := new(SchemaRegistryClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRegistryClusterObservation_2) DeepCopyInto(out *SchemaRegistryClusterObservation_2) { *out = *in if in.APIVersion != nil { in, out := &in.APIVersion, &out.APIVersion @@ -3003,18 +3446,38 @@ func (in *SchemaRegistryClusterObservation) DeepCopyInto(out *SchemaRegistryClus } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterObservation. -func (in *SchemaRegistryClusterObservation) DeepCopy() *SchemaRegistryClusterObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterObservation_2. +func (in *SchemaRegistryClusterObservation_2) DeepCopy() *SchemaRegistryClusterObservation_2 { if in == nil { return nil } - out := new(SchemaRegistryClusterObservation) + out := new(SchemaRegistryClusterObservation_2) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchemaRegistryClusterParameters) DeepCopyInto(out *SchemaRegistryClusterParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterParameters. +func (in *SchemaRegistryClusterParameters) DeepCopy() *SchemaRegistryClusterParameters { + if in == nil { + return nil + } + out := new(SchemaRegistryClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRegistryClusterParameters_2) DeepCopyInto(out *SchemaRegistryClusterParameters_2) { *out = *in if in.Environment != nil { in, out := &in.Environment, &out.Environment @@ -3037,12 +3500,12 @@ func (in *SchemaRegistryClusterParameters) DeepCopyInto(out *SchemaRegistryClust } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterParameters. -func (in *SchemaRegistryClusterParameters) DeepCopy() *SchemaRegistryClusterParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryClusterParameters_2. +func (in *SchemaRegistryClusterParameters_2) DeepCopy() *SchemaRegistryClusterParameters_2 { if in == nil { return nil } - out := new(SchemaRegistryClusterParameters) + out := new(SchemaRegistryClusterParameters_2) in.DeepCopyInto(out) return out } @@ -3082,6 +3545,41 @@ func (in *SchemaRegistryClusterStatus) DeepCopy() *SchemaRegistryClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaSpec) DeepCopyInto(out *SchemaSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaSpec. +func (in *SchemaSpec) DeepCopy() *SchemaSpec { + if in == nil { + return nil + } + out := new(SchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaStatus) DeepCopyInto(out *SchemaStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaStatus. +func (in *SchemaStatus) DeepCopy() *SchemaStatus { + if in == nil { + return nil + } + out := new(SchemaStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { *out = *in diff --git a/apis/confluent/v1alpha1/zz_generated.managed.go b/apis/confluent/v1alpha1/zz_generated.managed.go index 974bb54..7e5bb0d 100644 --- a/apis/confluent/v1alpha1/zz_generated.managed.go +++ b/apis/confluent/v1alpha1/zz_generated.managed.go @@ -427,6 +427,66 @@ func (mg *RoleBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReferen mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this Schema. +func (mg *Schema) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Schema. +func (mg *Schema) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Schema. +func (mg *Schema) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Schema. +func (mg *Schema) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Schema. +func (mg *Schema) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Schema. +func (mg *Schema) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Schema. +func (mg *Schema) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Schema. +func (mg *Schema) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Schema. +func (mg *Schema) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Schema. +func (mg *Schema) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Schema. +func (mg *Schema) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Schema. +func (mg *Schema) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this SchemaRegistryCluster. func (mg *SchemaRegistryCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/confluent/v1alpha1/zz_generated.managedlist.go b/apis/confluent/v1alpha1/zz_generated.managedlist.go index 3c51f84..2962a35 100644 --- a/apis/confluent/v1alpha1/zz_generated.managedlist.go +++ b/apis/confluent/v1alpha1/zz_generated.managedlist.go @@ -70,6 +70,15 @@ func (l *RoleBindingList) GetItems() []resource.Managed { return items } +// GetItems of this SchemaList. +func (l *SchemaList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this SchemaRegistryClusterList. func (l *SchemaRegistryClusterList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/confluent/v1alpha1/zz_generated_terraformed.go b/apis/confluent/v1alpha1/zz_generated_terraformed.go index df20b7c..9e18982 100755 --- a/apis/confluent/v1alpha1/zz_generated_terraformed.go +++ b/apis/confluent/v1alpha1/zz_generated_terraformed.go @@ -606,6 +606,90 @@ func (tr *RoleBinding) GetTerraformSchemaVersion() int { return 0 } +// GetTerraformResourceType returns Terraform resource type for this Schema +func (mg *Schema) GetTerraformResourceType() string { + return "confluent_schema" +} + +// GetConnectionDetailsMapping for this Schema +func (tr *Schema) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"credentials[*].key": "spec.forProvider.credentials[*].keySecretRef", "credentials[*].secret": "spec.forProvider.credentials[*].secretSecretRef"} +} + +// GetObservation of this Schema +func (tr *Schema) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Schema +func (tr *Schema) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Schema +func (tr *Schema) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Schema +func (tr *Schema) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Schema +func (tr *Schema) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Schema +func (tr *Schema) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this Schema using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Schema) LateInitialize(attrs []byte) (bool, error) { + params := &SchemaParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Schema) GetTerraformSchemaVersion() int { + return 0 +} + // GetTerraformResourceType returns Terraform resource type for this SchemaRegistryCluster func (mg *SchemaRegistryCluster) GetTerraformResourceType() string { return "confluent_schema_registry_cluster" @@ -675,7 +759,7 @@ func (tr *SchemaRegistryCluster) GetInitParameters() (map[string]any, error) { // LateInitialize this SchemaRegistryCluster using its observed tfState. // returns True if there are any spec changes for the resource. func (tr *SchemaRegistryCluster) LateInitialize(attrs []byte) (bool, error) { - params := &SchemaRegistryClusterParameters{} + params := &SchemaRegistryClusterParameters_2{} if err := json.TFParser.Unmarshal(attrs, params); err != nil { return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") } diff --git a/apis/confluent/v1alpha1/zz_schema_types.go b/apis/confluent/v1alpha1/zz_schema_types.go new file mode 100755 index 0000000..3848408 --- /dev/null +++ b/apis/confluent/v1alpha1/zz_schema_types.go @@ -0,0 +1,303 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SchemaCredentialsInitParameters struct { +} + +type SchemaCredentialsObservation struct { +} + +type SchemaCredentialsParameters struct { + + // The Schema Registry API Key. + // The Cluster API Key for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // The Schema Registry API Secret. + // The Cluster API Secret for your Confluent Cloud cluster. + // +kubebuilder:validation:Required + SecretSecretRef v1.SecretKeySelector `json:"secretSecretRef" tf:"-"` +} + +type SchemaInitParameters struct { + + // supports the following: + // The Cluster API Credentials. + Credentials []SchemaCredentialsInitParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The format of the schema. Accepted values are: AVRO, PROTOBUF, and JSON. + // The format of the Schema. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // An optional flag to control whether a schema should be soft or hard deleted. Set it to true if you want to hard delete a schema on destroy (see Schema Deletion Guidelines for more details). Must be unset when importing. Defaults to false (soft delete). + // Controls whether a schema should be soft or hard deleted. Set it to `true` if you want to hard delete a schema on destroy. Defaults to `false` (soft delete). Defaults to `false`. + HardDelete *bool `json:"hardDelete,omitempty" tf:"hard_delete,omitempty"` + + // An optional flag to control whether a schema should be recreated on an update. Set it to true if you want to manage different schema versions using different resource instances. Must be set to the target value when importing. Defaults to false, which manages the latest schema version only. The resource instance always points to the latest schema version by supporting in-place updates. + // Controls whether a schema should be recreated on update. Defaults to `false`. + RecreateOnUpdate *bool `json:"recreateOnUpdate,omitempty" tf:"recreate_on_update,omitempty"` + + // The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443). + // The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + // The schema string, for example, file("./schema_version_1.avsc"). + // The definition of the Schema. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // The list of referenced schemas (see Schema References for more details): + // The list of references to other Schemas. + SchemaReference []SchemaReferenceInitParameters `json:"schemaReference,omitempty" tf:"schema_reference,omitempty"` + + // supports the following: + SchemaRegistryCluster []SchemaRegistryClusterInitParameters `json:"schemaRegistryCluster,omitempty" tf:"schema_registry_cluster,omitempty"` + + // The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.) + // The name of the Schema Registry Subject. + SubjectName *string `json:"subjectName,omitempty" tf:"subject_name,omitempty"` +} + +type SchemaObservation struct { + + // supports the following: + // The Cluster API Credentials. + Credentials []SchemaCredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The format of the schema. Accepted values are: AVRO, PROTOBUF, and JSON. + // The format of the Schema. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // An optional flag to control whether a schema should be soft or hard deleted. Set it to true if you want to hard delete a schema on destroy (see Schema Deletion Guidelines for more details). Must be unset when importing. Defaults to false (soft delete). + // Controls whether a schema should be soft or hard deleted. Set it to `true` if you want to hard delete a schema on destroy. Defaults to `false` (soft delete). Defaults to `false`. + HardDelete *bool `json:"hardDelete,omitempty" tf:"hard_delete,omitempty"` + + // The ID of the Schema Registry cluster, for example, lsrc-abc123. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An optional flag to control whether a schema should be recreated on an update. Set it to true if you want to manage different schema versions using different resource instances. Must be set to the target value when importing. Defaults to false, which manages the latest schema version only. The resource instance always points to the latest schema version by supporting in-place updates. + // Controls whether a schema should be recreated on update. Defaults to `false`. + RecreateOnUpdate *bool `json:"recreateOnUpdate,omitempty" tf:"recreate_on_update,omitempty"` + + // The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443). + // The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + // The schema string, for example, file("./schema_version_1.avsc"). + // The definition of the Schema. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // The globally unique ID of the Schema, for example, 100003. If the same schema is registered under a different subject, the same identifier will be returned. However, the version of the schema may be different under different subjects. + // Globally unique identifier of the Schema returned for a creation request. It should be used to retrieve this schema from the schemas resource and is different from the schema’s version which is associated with the subject. + SchemaIdentifier *float64 `json:"schemaIdentifier,omitempty" tf:"schema_identifier,omitempty"` + + // The list of referenced schemas (see Schema References for more details): + // The list of references to other Schemas. + SchemaReference []SchemaReferenceObservation `json:"schemaReference,omitempty" tf:"schema_reference,omitempty"` + + // supports the following: + SchemaRegistryCluster []SchemaRegistryClusterObservation `json:"schemaRegistryCluster,omitempty" tf:"schema_registry_cluster,omitempty"` + + // The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.) + // The name of the Schema Registry Subject. + SubjectName *string `json:"subjectName,omitempty" tf:"subject_name,omitempty"` + + // The version, representing the exact version of the schema under the registered subject. + // The version number of the Schema. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type SchemaParameters struct { + + // supports the following: + // The Cluster API Credentials. + // +kubebuilder:validation:Optional + Credentials []SchemaCredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The format of the schema. Accepted values are: AVRO, PROTOBUF, and JSON. + // The format of the Schema. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // An optional flag to control whether a schema should be soft or hard deleted. Set it to true if you want to hard delete a schema on destroy (see Schema Deletion Guidelines for more details). Must be unset when importing. Defaults to false (soft delete). + // Controls whether a schema should be soft or hard deleted. Set it to `true` if you want to hard delete a schema on destroy. Defaults to `false` (soft delete). Defaults to `false`. + // +kubebuilder:validation:Optional + HardDelete *bool `json:"hardDelete,omitempty" tf:"hard_delete,omitempty"` + + // An optional flag to control whether a schema should be recreated on an update. Set it to true if you want to manage different schema versions using different resource instances. Must be set to the target value when importing. Defaults to false, which manages the latest schema version only. The resource instance always points to the latest schema version by supporting in-place updates. + // Controls whether a schema should be recreated on update. Defaults to `false`. + // +kubebuilder:validation:Optional + RecreateOnUpdate *bool `json:"recreateOnUpdate,omitempty" tf:"recreate_on_update,omitempty"` + + // The REST endpoint of the Schema Registry cluster, for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443). + // The REST endpoint of the Schema Registry cluster, for example, `https://psrc-00000.us-central1.gcp.confluent.cloud:443`). + // +kubebuilder:validation:Optional + RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` + + // The schema string, for example, file("./schema_version_1.avsc"). + // The definition of the Schema. + // +kubebuilder:validation:Optional + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // The list of referenced schemas (see Schema References for more details): + // The list of references to other Schemas. + // +kubebuilder:validation:Optional + SchemaReference []SchemaReferenceParameters `json:"schemaReference,omitempty" tf:"schema_reference,omitempty"` + + // supports the following: + // +kubebuilder:validation:Optional + SchemaRegistryCluster []SchemaRegistryClusterParameters `json:"schemaRegistryCluster,omitempty" tf:"schema_registry_cluster,omitempty"` + + // The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.) + // The name of the Schema Registry Subject. + // +kubebuilder:validation:Optional + SubjectName *string `json:"subjectName,omitempty" tf:"subject_name,omitempty"` +} + +type SchemaReferenceInitParameters struct { + + // The name of the subject, representing the subject under which the referenced schema is registered. + // The name of the Schema references (for example, "io.confluent.kafka.example.User"). For Avro, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf, it is the name of another Protobuf file. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.) + // The name of the referenced Schema Registry Subject (for example, "User"). + SubjectName *string `json:"subjectName,omitempty" tf:"subject_name,omitempty"` + + // The version, representing the exact version of the schema under the registered subject. + // The version of the referenced Schema. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type SchemaReferenceObservation struct { + + // The name of the subject, representing the subject under which the referenced schema is registered. + // The name of the Schema references (for example, "io.confluent.kafka.example.User"). For Avro, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf, it is the name of another Protobuf file. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.) + // The name of the referenced Schema Registry Subject (for example, "User"). + SubjectName *string `json:"subjectName,omitempty" tf:"subject_name,omitempty"` + + // The version, representing the exact version of the schema under the registered subject. + // The version of the referenced Schema. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type SchemaReferenceParameters struct { + + // The name of the subject, representing the subject under which the referenced schema is registered. + // The name of the Schema references (for example, "io.confluent.kafka.example.User"). For Avro, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf, it is the name of another Protobuf file. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name for the reference. (For Avro Schema, the reference name is the fully qualified schema name, for JSON Schema it is a URL, and for Protobuf Schema, it is the name of another Protobuf file.) + // The name of the referenced Schema Registry Subject (for example, "User"). + // +kubebuilder:validation:Optional + SubjectName *string `json:"subjectName" tf:"subject_name,omitempty"` + + // The version, representing the exact version of the schema under the registered subject. + // The version of the referenced Schema. + // +kubebuilder:validation:Optional + Version *float64 `json:"version" tf:"version,omitempty"` +} + +type SchemaRegistryClusterInitParameters struct { + + // The ID of the Schema Registry cluster, for example, lsrc-abc123. + // The Schema Registry cluster ID (e.g., `lsrc-abc123`). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type SchemaRegistryClusterObservation struct { + + // The ID of the Schema Registry cluster, for example, lsrc-abc123. + // The Schema Registry cluster ID (e.g., `lsrc-abc123`). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type SchemaRegistryClusterParameters struct { + + // The ID of the Schema Registry cluster, for example, lsrc-abc123. + // The Schema Registry cluster ID (e.g., `lsrc-abc123`). + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +// SchemaSpec defines the desired state of Schema +type SchemaSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SchemaParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SchemaInitParameters `json:"initProvider,omitempty"` +} + +// SchemaStatus defines the observed state of Schema. +type SchemaStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SchemaObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Schema is the Schema for the Schemas API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,confluent} +type Schema struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.format) || (has(self.initProvider) && has(self.initProvider.format))",message="spec.forProvider.format is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subjectName) || (has(self.initProvider) && has(self.initProvider.subjectName))",message="spec.forProvider.subjectName is a required parameter" + Spec SchemaSpec `json:"spec"` + Status SchemaStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SchemaList contains a list of Schemas +type SchemaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Schema `json:"items"` +} + +// Repository type metadata. +var ( + Schema_Kind = "Schema" + Schema_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Schema_Kind}.String() + Schema_KindAPIVersion = Schema_Kind + "." + CRDGroupVersion.String() + Schema_GroupVersionKind = CRDGroupVersion.WithKind(Schema_Kind) +) + +func init() { + SchemeBuilder.Register(&Schema{}, &SchemaList{}) +} diff --git a/apis/confluent/v1alpha1/zz_schemaregistrycluster_types.go b/apis/confluent/v1alpha1/zz_schemaregistrycluster_types.go index bdfea2a..874a4c5 100755 --- a/apis/confluent/v1alpha1/zz_schemaregistrycluster_types.go +++ b/apis/confluent/v1alpha1/zz_schemaregistrycluster_types.go @@ -61,7 +61,7 @@ type SchemaRegistryClusterEnvironmentParameters struct { ID *string `json:"id" tf:"id,omitempty"` } -type SchemaRegistryClusterInitParameters struct { +type SchemaRegistryClusterInitParameters_2 struct { // supports the following: // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. @@ -75,7 +75,7 @@ type SchemaRegistryClusterInitParameters struct { Region []RegionInitParameters `json:"region,omitempty" tf:"region,omitempty"` } -type SchemaRegistryClusterObservation struct { +type SchemaRegistryClusterObservation_2 struct { // An API Version of the schema version of the Schema Registry cluster, for example, srcm/v2. // API Version defines the schema version of this representation of a Schema Registry Cluster. @@ -112,7 +112,7 @@ type SchemaRegistryClusterObservation struct { RestEndpoint *string `json:"restEndpoint,omitempty" tf:"rest_endpoint,omitempty"` } -type SchemaRegistryClusterParameters struct { +type SchemaRegistryClusterParameters_2 struct { // supports the following: // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. @@ -132,7 +132,7 @@ type SchemaRegistryClusterParameters struct { // SchemaRegistryClusterSpec defines the desired state of SchemaRegistryCluster type SchemaRegistryClusterSpec struct { v1.ResourceSpec `json:",inline"` - ForProvider SchemaRegistryClusterParameters `json:"forProvider"` + ForProvider SchemaRegistryClusterParameters_2 `json:"forProvider"` // THIS IS A BETA FIELD. It will be honored // unless the Management Policies feature flag is disabled. // InitProvider holds the same fields as ForProvider, with the exception @@ -143,13 +143,13 @@ type SchemaRegistryClusterSpec struct { // required on creation, but we do not desire to update them after creation, // for example because of an external controller is managing them, like an // autoscaler. - InitProvider SchemaRegistryClusterInitParameters `json:"initProvider,omitempty"` + InitProvider SchemaRegistryClusterInitParameters_2 `json:"initProvider,omitempty"` } // SchemaRegistryClusterStatus defines the observed state of SchemaRegistryCluster. type SchemaRegistryClusterStatus struct { v1.ResourceStatus `json:",inline"` - AtProvider SchemaRegistryClusterObservation `json:"atProvider,omitempty"` + AtProvider SchemaRegistryClusterObservation_2 `json:"atProvider,omitempty"` } // +kubebuilder:object:root=true diff --git a/config/external_name.go b/config/external_name.go index 9418198..40f3822 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -18,6 +18,7 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "confluent_kafka_acl": config.IdentifierFromProvider, "confluent_kafka_topic": config.IdentifierFromProvider, "confluent_role_binding": config.IdentifierFromProvider, + "confluent_schema": config.IdentifierFromProvider, "confluent_schema_registry_cluster": config.IdentifierFromProvider, } diff --git a/config/generated.lst b/config/generated.lst index 1219ec5..e4991c5 100644 --- a/config/generated.lst +++ b/config/generated.lst @@ -1 +1 @@ -["confluent_api_key","confluent_environment","confluent_kafka_acl","confluent_kafka_cluster","confluent_kafka_cluster_config","confluent_kafka_topic","confluent_role_binding","confluent_schema_registry_cluster","confluent_service_account"] \ No newline at end of file +["confluent_api_key","confluent_environment","confluent_kafka_acl","confluent_kafka_cluster","confluent_kafka_cluster_config","confluent_kafka_topic","confluent_role_binding","confluent_schema","confluent_schema_registry_cluster","confluent_service_account"] \ No newline at end of file diff --git a/examples-generated/confluent/schema.yaml b/examples-generated/confluent/schema.yaml new file mode 100644 index 0000000..ca40956 --- /dev/null +++ b/examples-generated/confluent/schema.yaml @@ -0,0 +1,25 @@ +apiVersion: confluent.crossplane.io/v1alpha1 +kind: Schema +metadata: + annotations: + meta.upbound.io/example-id: confluent/v1alpha1/schema + labels: + testing.upbound.io/example-name: avro-purchase + name: avro-purchase +spec: + forProvider: + credentials: + - keySecretRef: + key: example-key + name: example-secret + namespace: upbound-system + secretSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + format: AVRO + restEndpoint: ${confluent_schema_registry_cluster.essentials.rest_endpoint} + schema: ${file("./schemas/avro/purchase.avsc")} + schemaRegistryCluster: + - id: ${confluent_schema_registry_cluster.essentials.id} + subjectName: avro-purchase-value diff --git a/internal/controller/confluent/schema/zz_controller.go b/internal/controller/confluent/schema/zz_controller.go new file mode 100755 index 0000000..1ccf933 --- /dev/null +++ b/internal/controller/confluent/schema/zz_controller.go @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package schema + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/provider-confluent/apis/confluent/v1alpha1" + features "github.com/crossplane-contrib/provider-confluent/internal/features" +) + +// Setup adds a controller that reconciles Schema managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Schema_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Schema_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Schema_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["confluent_schema"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Schema_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Schema{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 61c6560..ed8a6cf 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -16,6 +16,7 @@ import ( kafkaacl "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/kafkaacl" kafkatopic "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/kafkatopic" rolebinding "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/rolebinding" + schema "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/schema" schemaregistrycluster "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/schemaregistrycluster" serviceaccount "github.com/crossplane-contrib/provider-confluent/internal/controller/confluent/serviceaccount" providerconfig "github.com/crossplane-contrib/provider-confluent/internal/controller/providerconfig" @@ -32,6 +33,7 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { kafkaacl.Setup, kafkatopic.Setup, rolebinding.Setup, + schema.Setup, schemaregistrycluster.Setup, serviceaccount.Setup, providerconfig.Setup, diff --git a/package/crds/confluent.crossplane.io_schemas.yaml b/package/crds/confluent.crossplane.io_schemas.yaml new file mode 100644 index 0000000..542ed3b --- /dev/null +++ b/package/crds/confluent.crossplane.io_schemas.yaml @@ -0,0 +1,646 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: schemas.confluent.crossplane.io +spec: + group: confluent.crossplane.io + names: + categories: + - crossplane + - managed + - confluent + kind: Schema + listKind: SchemaList + plural: schemas + singular: schema + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Schema is the Schema for the Schemas API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SchemaSpec defines the desired state of Schema + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + credentials: + description: 'supports the following: The Cluster API Credentials.' + items: + properties: + keySecretRef: + description: The Schema Registry API Key. The Cluster API + Key for your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Schema Registry API Secret. The Cluster + API Secret for your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + format: + description: 'The format of the schema. Accepted values are: AVRO, + PROTOBUF, and JSON. The format of the Schema.' + type: string + hardDelete: + description: An optional flag to control whether a schema should + be soft or hard deleted. Set it to true if you want to hard + delete a schema on destroy (see Schema Deletion Guidelines for + more details). Must be unset when importing. Defaults to false + (soft delete). Controls whether a schema should be soft or hard + deleted. Set it to `true` if you want to hard delete a schema + on destroy. Defaults to `false` (soft delete). Defaults to `false`. + type: boolean + recreateOnUpdate: + description: An optional flag to control whether a schema should + be recreated on an update. Set it to true if you want to manage + different schema versions using different resource instances. + Must be set to the target value when importing. Defaults to + false, which manages the latest schema version only. The resource + instance always points to the latest schema version by supporting + in-place updates. Controls whether a schema should be recreated + on update. Defaults to `false`. + type: boolean + restEndpoint: + description: The REST endpoint of the Schema Registry cluster, + for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443). + The REST endpoint of the Schema Registry cluster, for example, + `https://psrc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + schema: + description: The schema string, for example, file("./schema_version_1.avsc"). + The definition of the Schema. + type: string + schemaReference: + description: 'The list of referenced schemas (see Schema References + for more details): The list of references to other Schemas.' + items: + properties: + name: + description: The name of the subject, representing the subject + under which the referenced schema is registered. The name + of the Schema references (for example, "io.confluent.kafka.example.User"). + For Avro, the reference name is the fully qualified schema + name, for JSON Schema it is a URL, and for Protobuf, it + is the name of another Protobuf file. + type: string + subjectName: + description: The name for the reference. (For Avro Schema, + the reference name is the fully qualified schema name, + for JSON Schema it is a URL, and for Protobuf Schema, + it is the name of another Protobuf file.) The name of + the referenced Schema Registry Subject (for example, "User"). + type: string + version: + description: The version, representing the exact version + of the schema under the registered subject. The version + of the referenced Schema. + type: number + type: object + type: array + schemaRegistryCluster: + description: 'supports the following:' + items: + properties: + id: + description: The ID of the Schema Registry cluster, for + example, lsrc-abc123. The Schema Registry cluster ID (e.g., + `lsrc-abc123`). + type: string + type: object + type: array + subjectName: + description: The name for the reference. (For Avro Schema, the + reference name is the fully qualified schema name, for JSON + Schema it is a URL, and for Protobuf Schema, it is the name + of another Protobuf file.) The name of the Schema Registry Subject. + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + credentials: + description: 'supports the following: The Cluster API Credentials.' + items: + type: object + type: array + format: + description: 'The format of the schema. Accepted values are: AVRO, + PROTOBUF, and JSON. The format of the Schema.' + type: string + hardDelete: + description: An optional flag to control whether a schema should + be soft or hard deleted. Set it to true if you want to hard + delete a schema on destroy (see Schema Deletion Guidelines for + more details). Must be unset when importing. Defaults to false + (soft delete). Controls whether a schema should be soft or hard + deleted. Set it to `true` if you want to hard delete a schema + on destroy. Defaults to `false` (soft delete). Defaults to `false`. + type: boolean + recreateOnUpdate: + description: An optional flag to control whether a schema should + be recreated on an update. Set it to true if you want to manage + different schema versions using different resource instances. + Must be set to the target value when importing. Defaults to + false, which manages the latest schema version only. The resource + instance always points to the latest schema version by supporting + in-place updates. Controls whether a schema should be recreated + on update. Defaults to `false`. + type: boolean + restEndpoint: + description: The REST endpoint of the Schema Registry cluster, + for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443). + The REST endpoint of the Schema Registry cluster, for example, + `https://psrc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + schema: + description: The schema string, for example, file("./schema_version_1.avsc"). + The definition of the Schema. + type: string + schemaReference: + description: 'The list of referenced schemas (see Schema References + for more details): The list of references to other Schemas.' + items: + properties: + name: + description: The name of the subject, representing the subject + under which the referenced schema is registered. The name + of the Schema references (for example, "io.confluent.kafka.example.User"). + For Avro, the reference name is the fully qualified schema + name, for JSON Schema it is a URL, and for Protobuf, it + is the name of another Protobuf file. + type: string + subjectName: + description: The name for the reference. (For Avro Schema, + the reference name is the fully qualified schema name, + for JSON Schema it is a URL, and for Protobuf Schema, + it is the name of another Protobuf file.) The name of + the referenced Schema Registry Subject (for example, "User"). + type: string + version: + description: The version, representing the exact version + of the schema under the registered subject. The version + of the referenced Schema. + type: number + type: object + type: array + schemaRegistryCluster: + description: 'supports the following:' + items: + properties: + id: + description: The ID of the Schema Registry cluster, for + example, lsrc-abc123. The Schema Registry cluster ID (e.g., + `lsrc-abc123`). + type: string + type: object + type: array + subjectName: + description: The name for the reference. (For Avro Schema, the + reference name is the fully qualified schema name, for JSON + Schema it is a URL, and for Protobuf Schema, it is the name + of another Protobuf file.) The name of the Schema Registry Subject. + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.format is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.format) + || (has(self.initProvider) && has(self.initProvider.format))' + - message: spec.forProvider.subjectName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subjectName) + || (has(self.initProvider) && has(self.initProvider.subjectName))' + status: + description: SchemaStatus defines the observed state of Schema. + properties: + atProvider: + properties: + credentials: + description: 'supports the following: The Cluster API Credentials.' + items: + properties: + keySecretRef: + description: The Schema Registry API Key. The Cluster API + Key for your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + secretSecretRef: + description: The Schema Registry API Secret. The Cluster + API Secret for your Confluent Cloud cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - keySecretRef + - secretSecretRef + type: object + type: array + format: + description: 'The format of the schema. Accepted values are: AVRO, + PROTOBUF, and JSON. The format of the Schema.' + type: string + hardDelete: + description: An optional flag to control whether a schema should + be soft or hard deleted. Set it to true if you want to hard + delete a schema on destroy (see Schema Deletion Guidelines for + more details). Must be unset when importing. Defaults to false + (soft delete). Controls whether a schema should be soft or hard + deleted. Set it to `true` if you want to hard delete a schema + on destroy. Defaults to `false` (soft delete). Defaults to `false`. + type: boolean + id: + description: The ID of the Schema Registry cluster, for example, + lsrc-abc123. + type: string + recreateOnUpdate: + description: An optional flag to control whether a schema should + be recreated on an update. Set it to true if you want to manage + different schema versions using different resource instances. + Must be set to the target value when importing. Defaults to + false, which manages the latest schema version only. The resource + instance always points to the latest schema version by supporting + in-place updates. Controls whether a schema should be recreated + on update. Defaults to `false`. + type: boolean + restEndpoint: + description: The REST endpoint of the Schema Registry cluster, + for example, https://psrc-00000.us-central1.gcp.confluent.cloud:443). + The REST endpoint of the Schema Registry cluster, for example, + `https://psrc-00000.us-central1.gcp.confluent.cloud:443`). + type: string + schema: + description: The schema string, for example, file("./schema_version_1.avsc"). + The definition of the Schema. + type: string + schemaIdentifier: + description: The globally unique ID of the Schema, for example, + 100003. If the same schema is registered under a different subject, + the same identifier will be returned. However, the version of + the schema may be different under different subjects. Globally + unique identifier of the Schema returned for a creation request. + It should be used to retrieve this schema from the schemas resource + and is different from the schema’s version which is associated + with the subject. + type: number + schemaReference: + description: 'The list of referenced schemas (see Schema References + for more details): The list of references to other Schemas.' + items: + properties: + name: + description: The name of the subject, representing the subject + under which the referenced schema is registered. The name + of the Schema references (for example, "io.confluent.kafka.example.User"). + For Avro, the reference name is the fully qualified schema + name, for JSON Schema it is a URL, and for Protobuf, it + is the name of another Protobuf file. + type: string + subjectName: + description: The name for the reference. (For Avro Schema, + the reference name is the fully qualified schema name, + for JSON Schema it is a URL, and for Protobuf Schema, + it is the name of another Protobuf file.) The name of + the referenced Schema Registry Subject (for example, "User"). + type: string + version: + description: The version, representing the exact version + of the schema under the registered subject. The version + of the referenced Schema. + type: number + type: object + type: array + schemaRegistryCluster: + description: 'supports the following:' + items: + properties: + id: + description: The ID of the Schema Registry cluster, for + example, lsrc-abc123. The Schema Registry cluster ID (e.g., + `lsrc-abc123`). + type: string + type: object + type: array + subjectName: + description: The name for the reference. (For Avro Schema, the + reference name is the fully qualified schema name, for JSON + Schema it is a URL, and for Protobuf Schema, it is the name + of another Protobuf file.) The name of the Schema Registry Subject. + type: string + version: + description: The version, representing the exact version of the + schema under the registered subject. The version number of the + Schema. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}