diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml index 92ad5ceb..626f3788 100644 --- a/.github/workflows/build-and-test.yaml +++ b/.github/workflows/build-and-test.yaml @@ -10,7 +10,7 @@ on: env: product_name: db-operator - go_version: "1.17" + go_version: "1.18" go_os: linux go_arch: amd64 main_go_path: . @@ -20,10 +20,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Check Code Style - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: version: v1.49.0 ## https://github.com/golangci/golangci-lint/releases args: --timeout 3m0s @@ -33,23 +33,23 @@ jobs: needs: lint steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 - name: Login to GitHub Container Registry - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: ${{ env.go_version }} diff --git a/.github/workflows/image-publish.yaml b/.github/workflows/image-publish.yaml index 2268c49d..66fe549f 100644 --- a/.github/workflows/image-publish.yaml +++ b/.github/workflows/image-publish.yaml @@ -24,10 +24,10 @@ jobs: docker_arch: "arm64/v8" steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: ${{ env.go_version }} @@ -43,14 +43,14 @@ jobs: uses: docker/setup-buildx-action@v2 - name: Login to GitHub Container Registry - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Login to Dockerhub - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/Dockerfile b/Dockerfile index fafd3d9f..084cbe5b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.4-alpine3.15 as builder +FROM golang:1.18.10-alpine3.17 as builder RUN apk update && apk upgrade && \ apk add --no-cache bash build-base diff --git a/Makefile b/Makefile index 265febc0..32e49e43 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,7 @@ generate: controller-gen ## generate supporting code for custom resource types CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0) + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.3) # go-get-tool will 'go get' any package $2 and install it to $1. PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) diff --git a/PROJECT b/PROJECT index 287bda4c..3370a41f 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: kci.rocks layout: - go.kubebuilder.io/v3 @@ -19,4 +23,27 @@ resources: kind: Database path: github.com/kloeckner-i/db-operator/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: kci.rocks + kind: Database + path: github.com/kloeckner-i/db-operator/api/v1beta1 + version: v1beta1 + webhooks: + conversion: true + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + domain: kci.rocks + kind: DbInstance + path: github.com/kloeckner-i/db-operator/api/v1beta1 + version: v1beta1 + webhooks: + conversion: true + defaulting: true + validation: true + webhookVersion: v1 version: "3" diff --git a/README.md b/README.md index 69638000..78312b46 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,10 @@ DB Operator provides following features: The repository contains helm charts for db-operator is moved to https://github.com/kloeckner-i/charts New chart after db-operator > 1.2.7, db-instances > 1.3.0 will be only available in new repository. +## CRD Versions are upgraded! +Now both `DbInstance` and `Database` resources are upgraded to `v1beta1`. +In case you were using `connectionStringTemplate`, make sure I've migrated to `secretsTempaltes` before upgrading, because `connectionStringTemplate` is removed in this version. Everything else should go seamless. We've added `Webhooks` that will take care of resources with an old API version and convert them to the newer one. + ### Downloading old charts Installing older version of charts is still possible. diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 70cb109e..b42827f0 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -19,6 +19,9 @@ package v1alpha1 import ( "errors" + "github.com/kloeckner-i/db-operator/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -150,3 +153,40 @@ func (db *Database) IsMonitoringEnabled() (bool, error) { func (db *Database) InstanceAccessSecretName() string { return "dbin-" + db.Spec.Instance + "-access-secret" } + +// ConvertTo converts this v1alpha1 to v1beta1. (upgrade) +func (db *Database) ConvertTo(dstRaw conversion.Hub) error { + + dst := dstRaw.(*v1beta1.Database) + dst.ObjectMeta = db.ObjectMeta + + dst.Spec.Backup = v1beta1.DatabaseBackup(db.Spec.Backup) + dst.Spec.Cleanup = db.Spec.Cleanup + dst.Spec.DeletionProtected = db.Spec.DeletionProtected + dst.Spec.Instance = db.Spec.Instance + dst.Spec.Postgres.DropPublicSchema = db.Spec.Postgres.DropPublicSchema + dst.Spec.Postgres.Extensions = db.Spec.Extensions + dst.Spec.Postgres.Schemas = db.Spec.Postgres.Schemas + dst.Spec.SecretName = db.Spec.SecretName + dst.Spec.SecretsTemplates = db.Spec.SecretsTemplates + + return nil +} + +// ConvertFrom converts from the Hub version (v1beta1) to (v1alpha1). (downgrade) +func (dst *Database) ConvertFrom(srcRaw conversion.Hub) error { + db := srcRaw.(*v1beta1.Database) + dst.ObjectMeta = db.ObjectMeta + + dst.Spec.Backup = DatabaseBackup(db.Spec.Backup) + dst.Spec.Cleanup = db.Spec.Cleanup + dst.Spec.DeletionProtected = db.Spec.DeletionProtected + dst.Spec.Instance = db.Spec.Instance + dst.Spec.Postgres.DropPublicSchema = db.Spec.Postgres.DropPublicSchema + dst.Spec.Extensions = db.Spec.Postgres.Extensions + dst.Spec.Postgres.Schemas = db.Spec.Postgres.Schemas + dst.Spec.SecretName = db.Spec.SecretName + dst.Spec.SecretsTemplates = db.Spec.SecretsTemplates + + return nil +} diff --git a/api/v1alpha1/dbinstance_types.go b/api/v1alpha1/dbinstance_types.go index b8405e59..68c3effe 100644 --- a/api/v1alpha1/dbinstance_types.go +++ b/api/v1alpha1/dbinstance_types.go @@ -19,7 +19,9 @@ package v1alpha1 import ( "errors" + "github.com/kloeckner-i/db-operator/api/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -188,6 +190,47 @@ func (dbin *DbInstance) IsMonitoringEnabled() bool { if dbin.Spec.Monitoring.Enabled == false { return false } - return true } + +// ConvertTo converts this v1alpha1 to v1beta1. (upgrade) +func (dbin *DbInstance) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1beta1.DbInstance) + dst.ObjectMeta = dbin.ObjectMeta + dst.Spec.AdminUserSecret = v1beta1.NamespacedName(dbin.Spec.AdminUserSecret) + dst.Spec.Backup = v1beta1.DbInstanceBackup(dbin.Spec.Backup) + if dbin.Spec.DbInstanceSource.Generic != nil { + dst.Spec.DbInstanceSource.Generic = (*v1beta1.GenericInstance)(dbin.Spec.DbInstanceSource.Generic) + } else if dbin.Spec.DbInstanceSource.Google != nil { + + dst.Spec.DbInstanceSource.Google.APIEndpoint = dbin.Spec.DbInstanceSource.Google.APIEndpoint + dst.Spec.DbInstanceSource.Google.InstanceName = dbin.Spec.DbInstanceSource.Google.InstanceName + dst.Spec.DbInstanceSource.Google.ClientSecret = v1beta1.NamespacedName(dbin.Spec.DbInstanceSource.Google.ClientSecret) + dst.Spec.DbInstanceSource.Google.ConfigmapName = v1beta1.NamespacedName(dbin.Spec.Google.ConfigmapName) + } + dst.Spec.Engine = dbin.Spec.Engine + dst.Spec.Monitoring = v1beta1.DbInstanceMonitoring(dbin.Spec.Monitoring) + dst.Spec.SSLConnection = v1beta1.DbInstanceSSLConnection(dbin.Spec.SSLConnection) + return nil +} + +// ConvertFrom converts from the Hub version (v1beta1) to (v1alpha1). (downgrade) +func (dst *DbInstance) ConvertFrom(srcRaw conversion.Hub) error { + dbin := srcRaw.(*v1beta1.DbInstance) + dst.ObjectMeta = dbin.ObjectMeta + dst.Spec.AdminUserSecret = NamespacedName(dbin.Spec.AdminUserSecret) + dst.Spec.Backup = DbInstanceBackup(dbin.Spec.Backup) + if dbin.Spec.DbInstanceSource.Generic != nil { + dst.Spec.DbInstanceSource.Generic = (*GenericInstance)(dbin.Spec.DbInstanceSource.Generic) + } else if dbin.Spec.DbInstanceSource.Google != nil { + + dst.Spec.DbInstanceSource.Google.APIEndpoint = dbin.Spec.DbInstanceSource.Google.APIEndpoint + dst.Spec.DbInstanceSource.Google.InstanceName = dbin.Spec.DbInstanceSource.Google.InstanceName + dst.Spec.DbInstanceSource.Google.ClientSecret = NamespacedName(dbin.Spec.DbInstanceSource.Google.ClientSecret) + dst.Spec.DbInstanceSource.Google.ConfigmapName = NamespacedName(dbin.Spec.Google.ConfigmapName) + } + dst.Spec.Engine = dbin.Spec.Engine + dst.Spec.Monitoring = DbInstanceMonitoring(dbin.Spec.Monitoring) + dst.Spec.SSLConnection = DbInstanceSSLConnection(dbin.Spec.SSLConnection) + return nil +} diff --git a/api/v1beta1/database_types.go b/api/v1beta1/database_types.go new file mode 100644 index 00000000..81705c18 --- /dev/null +++ b/api/v1beta1/database_types.go @@ -0,0 +1,149 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DatabaseSpec defines the desired state of Database +type DatabaseSpec struct { + SecretName string `json:"secretName"` + Instance string `json:"instance"` + DeletionProtected bool `json:"deletionProtected"` + Backup DatabaseBackup `json:"backup"` + SecretsTemplates map[string]string `json:"secretsTemplates,omitempty"` + Postgres Postgres `json:"postgres,omitempty"` + Cleanup bool `json:"cleanup,omitempty"` +} + +// Postgres struct should be used to provide resource that only applicable to postgres +type Postgres struct { + Extensions []string `json:"extensions,omitempty"` + // If set to true, the public schema will be dropped after the database creation + DropPublicSchema bool `json:"dropPublicSchema,omitempty"` + // Specify schemas to be created. The user created by db-operator will have all access on them. + Schemas []string `json:"schemas,omitempty"` +} + +// DatabaseStatus defines the observed state of Database +type DatabaseStatus struct { + // Important: Run "make generate" to regenerate code after modifying this file + // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html + Phase string `json:"phase"` + Status bool `json:"status"` + InstanceRef *DbInstance `json:"instanceRef"` + MonitorUserSecretName string `json:"monitorUserSecret,omitempty"` + ProxyStatus DatabaseProxyStatus `json:"proxyStatus,omitempty"` + DatabaseName string `json:"database"` + UserName string `json:"user"` +} + +// DatabaseProxyStatus defines whether proxy for database is enabled or not +// if so, provide information +type DatabaseProxyStatus struct { + Status bool `json:"status"` + ServiceName string `json:"serviceName"` + SQLPort int32 `json:"sqlPort"` +} + +// DatabaseBackup defines the desired state of backup and schedule +type DatabaseBackup struct { + Enable bool `json:"enable"` + Cron string `json:"cron"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=db +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`,description="current db phase" +// +kubebuilder:printcolumn:name="Status",type=boolean,JSONPath=`.status.status`,description="current db status" +// +kubebuilder:printcolumn:name="Protected",type=boolean,JSONPath=`.spec.deletionProtected`,description="If database is protected to not get deleted." +// +kubebuilder:printcolumn:name="DBInstance",type=string,JSONPath=`.spec.instance`,description="instance reference" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="time since creation of resource" +// +kubebuilder:storageversion +// Database is the Schema for the databases API +type Database struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseSpec `json:"spec,omitempty"` + Status DatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DatabaseList contains a list of Database +type DatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Database `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Database{}, &DatabaseList{}) +} + +// GetInstanceRef returns DbInstance pointer which used by Database +func (db *Database) GetInstanceRef() (*DbInstance, error) { + if db.Status.InstanceRef == nil { + return nil, errors.New("can not find instance ref") + } + return db.Status.InstanceRef, nil +} + +// GetEngineType returns type of database engine ex) postgres or mysql +func (db *Database) GetEngineType() (string, error) { + instance, err := db.GetInstanceRef() + if err != nil { + return "", err + } + + return instance.Spec.Engine, nil +} + +// GetBackendType returns type of instance infrastructure. +// Infrastructure where database is running ex) google cloud sql, generic instance +func (db *Database) GetBackendType() (string, error) { + instance, err := db.GetInstanceRef() + if err != nil { + return "", err + } + + return instance.GetBackendType() +} + +// IsMonitoringEnabled returns true if monitoring is enabled in DbInstance spec. +func (db *Database) IsMonitoringEnabled() (bool, error) { + instance, err := db.GetInstanceRef() + if err != nil { + return false, err + } + + return instance.IsMonitoringEnabled(), nil +} + +// AccessSecretName returns string value to define name of the secret resource for accessing instance +func (db *Database) InstanceAccessSecretName() string { + return "dbin-" + db.Spec.Instance + "-access-secret" +} + +func (db *Database) Hub() {} diff --git a/api/v1beta1/database_webhook.go b/api/v1beta1/database_webhook.go new file mode 100644 index 00000000..715086a2 --- /dev/null +++ b/api/v1beta1/database_webhook.go @@ -0,0 +1,75 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var databaselog = logf.Log.WithName("database-resource") + +func (r *Database) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-kci-rocks-v1beta1-database,mutating=true,failurePolicy=fail,sideEffects=None,groups=kci.rocks,resources=databases,verbs=create;update,versions=v1beta1,name=mdatabase.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &Database{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *Database) Default() { + databaselog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-kci-rocks-v1beta1-database,mutating=false,failurePolicy=fail,sideEffects=None,groups=kci.rocks,resources=databases,verbs=create;update,versions=v1beta1,name=vdatabase.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &Database{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *Database) ValidateCreate() error { + databaselog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *Database) ValidateUpdate(old runtime.Object) error { + databaselog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *Database) ValidateDelete() error { + databaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil +} diff --git a/api/v1beta1/dbinstance_types.go b/api/v1beta1/dbinstance_types.go new file mode 100644 index 00000000..7f0e88f3 --- /dev/null +++ b/api/v1beta1/dbinstance_types.go @@ -0,0 +1,192 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DbInstanceSpec defines the desired state of DbInstance +type DbInstanceSpec struct { + // Important: Run "make generate" to regenerate code after modifying this file + Engine string `json:"engine"` + AdminUserSecret NamespacedName `json:"adminSecretRef"` + Backup DbInstanceBackup `json:"backup,omitempty"` + Monitoring DbInstanceMonitoring `json:"monitoring,omitempty"` + SSLConnection DbInstanceSSLConnection `json:"sslConnection,omitempty"` + DbInstanceSource `json:",inline"` +} + +// DbInstanceSource represents the source of a instance. +// Only one of its members may be specified. +type DbInstanceSource struct { + Google *GoogleInstance `json:"google,omitempty" protobuf:"bytes,1,opt,name=google"` + Generic *GenericInstance `json:"generic,omitempty" protobuf:"bytes,2,opt,name=generic"` +} + +// DbInstanceStatus defines the observed state of DbInstance +type DbInstanceStatus struct { + // Important: Run "make generate" to regenerate code after modifying this file + Phase string `json:"phase"` + Status bool `json:"status"` + Info map[string]string `json:"info,omitempty"` + Checksums map[string]string `json:"checksums,omitempty"` +} + +// GoogleInstance is used when instance type is Google Cloud SQL +// and describes necessary informations to use google API to create sql instances +type GoogleInstance struct { + InstanceName string `json:"instance"` + ConfigmapName NamespacedName `json:"configmapRef"` + APIEndpoint string `json:"apiEndpoint,omitempty"` + ClientSecret NamespacedName `json:"clientSecretRef,omitempty"` +} + +// BackendServer defines backend database server +type BackendServer struct { + Host string `json:"host"` + Port uint16 `json:"port"` + MaxConnection uint16 `json:"maxConn"` + ReadOnly bool `json:"readonly,omitempty"` +} + +// GenericInstance is used when instance type is generic +// and describes necessary informations to use instance +// generic instance can be any backend, it must be reachable by described address and port +type GenericInstance struct { + Host string `json:"host"` + Port uint16 `json:"port"` + PublicIP string `json:"publicIp,omitempty"` + // BackupHost address will be used for dumping database for backup + // Usually secondary address for primary-secondary setup or cluster lb address + // If it's not defined, above Host will be used as backup host address. + BackupHost string `json:"backupHost,omitempty"` +} + +// DbInstanceBackup defines name of google bucket to use for storing database dumps for backup when backup is enabled +type DbInstanceBackup struct { + Bucket string `json:"bucket"` +} + +// DbInstanceMonitoring defines if exporter +type DbInstanceMonitoring struct { + Enabled bool `json:"enabled"` +} + +// DbInstanceSSLConnection defines weather connection from db-operator to instance has to be ssl or not +type DbInstanceSSLConnection struct { + Enabled bool `json:"enabled"` + // SkipVerity use SSL connection, but don't check against a CA + SkipVerify bool `json:"skip-verify"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster,shortName=dbin +//+kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`,description="current phase" +//+kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.status`,description="health status" +// +kubebuilder:storageversion + +// DbInstance is the Schema for the dbinstances API +type DbInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DbInstanceSpec `json:"spec,omitempty"` + Status DbInstanceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DbInstanceList contains a list of DbInstance +type DbInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DbInstance `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DbInstance{}, &DbInstanceList{}) +} + +// ValidateEngine checks if defined engine by DbInstance object is supported by db-operator +func (dbin *DbInstance) ValidateEngine() error { + if (dbin.Spec.Engine == "mysql") || (dbin.Spec.Engine == "postgres") { + return nil + } + + return errors.New("not supported engine type") +} + +// ValidateBackend checks if backend type of instance is defined properly +// returns error when more than one backend types are defined +// or when no backend type is defined +func (dbin *DbInstance) ValidateBackend() error { + source := dbin.Spec.DbInstanceSource + + if (source.Google == nil) && (source.Generic == nil) { + return errors.New("no instance type defined") + } + + numSources := 0 + + if source.Google != nil { + numSources++ + } + + if source.Generic != nil { + numSources++ + } + + if numSources > 1 { + return errors.New("may not specify more than 1 instance type") + } + + return nil +} + +// GetBackendType returns type of instance infrastructure. +// Infrastructure where database is running ex) google cloud sql, generic instance +func (dbin *DbInstance) GetBackendType() (string, error) { + err := dbin.ValidateBackend() + if err != nil { + return "", err + } + + source := dbin.Spec.DbInstanceSource + + if source.Google != nil { + return "google", nil + } + + if source.Generic != nil { + return "generic", nil + } + + return "", errors.New("no backend type defined") +} + +// IsMonitoringEnabled returns boolean value if monitoring is enabled for the instance +func (dbin *DbInstance) IsMonitoringEnabled() bool { + return dbin.Spec.Monitoring.Enabled +} + +func (db *DbInstance) Hub() {} diff --git a/api/v1beta1/dbinstance_webhook.go b/api/v1beta1/dbinstance_webhook.go new file mode 100644 index 00000000..13e1d65e --- /dev/null +++ b/api/v1beta1/dbinstance_webhook.go @@ -0,0 +1,75 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var dbinstancelog = logf.Log.WithName("dbinstance-resource") + +func (r *DbInstance) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-kci-rocks-v1beta1-dbinstance,mutating=true,failurePolicy=fail,sideEffects=None,groups=kci.rocks,resources=dbinstances,verbs=create;update,versions=v1beta1,name=mdbinstance.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DbInstance{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DbInstance) Default() { + dbinstancelog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-kci-rocks-v1beta1-dbinstance,mutating=false,failurePolicy=fail,sideEffects=None,groups=kci.rocks,resources=dbinstances,verbs=create;update,versions=v1beta1,name=vdbinstance.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &DbInstance{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DbInstance) ValidateCreate() error { + dbinstancelog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DbInstance) ValidateUpdate(old runtime.Object) error { + dbinstancelog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DbInstance) ValidateDelete() error { + dbinstancelog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil +} diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go new file mode 100644 index 00000000..072a997d --- /dev/null +++ b/api/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package v1beta1 contains API Schema definitions for the v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=kci.rocks +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "kci.rocks", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go new file mode 100644 index 00000000..302f911b --- /dev/null +++ b/api/v1beta1/types.go @@ -0,0 +1,38 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/types" + +// NamespacedName is a fork of the kubernetes api type of the same name. +// Sadly this is required because CRD structs must have all fields json tagged and the kubernetes type is not tagged. +type NamespacedName struct { + Namespace string `json:"Namespace"` + Name string `json:"Name"` +} + +// ToKubernetesType converts our local type to the kubernetes API equivalent. +func (nn *NamespacedName) ToKubernetesType() types.NamespacedName { + if nn == nil { + return types.NamespacedName{} + } + + return types.NamespacedName{ + Name: nn.Name, + Namespace: nn.Namespace, + } +} diff --git a/api/v1beta1/webhook_suite_test.go b/api/v1beta1/webhook_suite_test.go new file mode 100644 index 00000000..15a3c995 --- /dev/null +++ b/api/v1beta1/webhook_suite_test.go @@ -0,0 +1,135 @@ +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1beta1 + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + //+kubebuilder:scaffold:imports + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + err = AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = admissionv1beta1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + LeaderElection: false, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&Database{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + err = (&DbInstance{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..0e95a4b3 --- /dev/null +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,425 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2021 kloeckner.i GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendServer) DeepCopyInto(out *BackendServer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendServer. +func (in *BackendServer) DeepCopy() *BackendServer { + if in == nil { + return nil + } + out := new(BackendServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Database) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseBackup) DeepCopyInto(out *DatabaseBackup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseBackup. +func (in *DatabaseBackup) DeepCopy() *DatabaseBackup { + if in == nil { + return nil + } + out := new(DatabaseBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseList) DeepCopyInto(out *DatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Database, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList. +func (in *DatabaseList) DeepCopy() *DatabaseList { + if in == nil { + return nil + } + out := new(DatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseProxyStatus) DeepCopyInto(out *DatabaseProxyStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseProxyStatus. +func (in *DatabaseProxyStatus) DeepCopy() *DatabaseProxyStatus { + if in == nil { + return nil + } + out := new(DatabaseProxyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { + *out = *in + out.Backup = in.Backup + if in.SecretsTemplates != nil { + in, out := &in.SecretsTemplates, &out.SecretsTemplates + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Postgres.DeepCopyInto(&out.Postgres) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. +func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { + if in == nil { + return nil + } + out := new(DatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { + *out = *in + if in.InstanceRef != nil { + in, out := &in.InstanceRef, &out.InstanceRef + *out = new(DbInstance) + (*in).DeepCopyInto(*out) + } + out.ProxyStatus = in.ProxyStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. +func (in *DatabaseStatus) DeepCopy() *DatabaseStatus { + if in == nil { + return nil + } + out := new(DatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstance) DeepCopyInto(out *DbInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstance. +func (in *DbInstance) DeepCopy() *DbInstance { + if in == nil { + return nil + } + out := new(DbInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceBackup) DeepCopyInto(out *DbInstanceBackup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceBackup. +func (in *DbInstanceBackup) DeepCopy() *DbInstanceBackup { + if in == nil { + return nil + } + out := new(DbInstanceBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceList) DeepCopyInto(out *DbInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DbInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceList. +func (in *DbInstanceList) DeepCopy() *DbInstanceList { + if in == nil { + return nil + } + out := new(DbInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceMonitoring) DeepCopyInto(out *DbInstanceMonitoring) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceMonitoring. +func (in *DbInstanceMonitoring) DeepCopy() *DbInstanceMonitoring { + if in == nil { + return nil + } + out := new(DbInstanceMonitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceSSLConnection) DeepCopyInto(out *DbInstanceSSLConnection) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceSSLConnection. +func (in *DbInstanceSSLConnection) DeepCopy() *DbInstanceSSLConnection { + if in == nil { + return nil + } + out := new(DbInstanceSSLConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceSource) DeepCopyInto(out *DbInstanceSource) { + *out = *in + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleInstance) + **out = **in + } + if in.Generic != nil { + in, out := &in.Generic, &out.Generic + *out = new(GenericInstance) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceSource. +func (in *DbInstanceSource) DeepCopy() *DbInstanceSource { + if in == nil { + return nil + } + out := new(DbInstanceSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceSpec) DeepCopyInto(out *DbInstanceSpec) { + *out = *in + out.AdminUserSecret = in.AdminUserSecret + out.Backup = in.Backup + out.Monitoring = in.Monitoring + out.SSLConnection = in.SSLConnection + in.DbInstanceSource.DeepCopyInto(&out.DbInstanceSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceSpec. +func (in *DbInstanceSpec) DeepCopy() *DbInstanceSpec { + if in == nil { + return nil + } + out := new(DbInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbInstanceStatus) DeepCopyInto(out *DbInstanceStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Checksums != nil { + in, out := &in.Checksums, &out.Checksums + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbInstanceStatus. +func (in *DbInstanceStatus) DeepCopy() *DbInstanceStatus { + if in == nil { + return nil + } + out := new(DbInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericInstance) DeepCopyInto(out *GenericInstance) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericInstance. +func (in *GenericInstance) DeepCopy() *GenericInstance { + if in == nil { + return nil + } + out := new(GenericInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleInstance) DeepCopyInto(out *GoogleInstance) { + *out = *in + out.ConfigmapName = in.ConfigmapName + out.ClientSecret = in.ClientSecret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleInstance. +func (in *GoogleInstance) DeepCopy() *GoogleInstance { + if in == nil { + return nil + } + out := new(GoogleInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespacedName) DeepCopyInto(out *NamespacedName) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedName. +func (in *NamespacedName) DeepCopy() *NamespacedName { + if in == nil { + return nil + } + out := new(NamespacedName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Postgres) DeepCopyInto(out *Postgres) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Schemas != nil { + in, out := &in.Schemas, &out.Schemas + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Postgres. +func (in *Postgres) DeepCopy() *Postgres { + if in == nil { + return nil + } + out := new(Postgres) + in.DeepCopyInto(out) + return out +} diff --git a/charts/README.md b/charts/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 00000000..e72783ae --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,39 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kubernetes.io/name: issuer + app.kubernetes.io/instance: selfsigned-issuer + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/name: certificate + app.kubernetes.io/instance: serving-cert + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 00000000..bebea5a5 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 00000000..e631f777 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames diff --git a/config/crd/bases/kci.rocks_databases.yaml b/config/crd/bases/kci.rocks_databases.yaml index 9c5b14f3..f1987d0a 100644 --- a/config/crd/bases/kci.rocks_databases.yaml +++ b/config/crd/bases/kci.rocks_databases.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.11.3 creationTimestamp: null name: databases.kci.rocks spec: @@ -71,6 +70,8 @@ spec: - cron - enable type: object + cleanup: + type: boolean connectionStringTemplate: description: 'ConnectionStringTemplate field can be used to pass a custom template for generating a db connection string. These keywords @@ -314,12 +315,299 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: current db phase + jsonPath: .status.phase + name: Phase + type: string + - description: current db status + jsonPath: .status.status + name: Status + type: boolean + - description: If database is protected to not get deleted. + jsonPath: .spec.deletionProtected + name: Protected + type: boolean + - description: instance reference + jsonPath: .spec.instance + name: DBInstance + type: string + - description: time since creation of resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseSpec defines the desired state of Database + properties: + backup: + description: DatabaseBackup defines the desired state of backup and + schedule + properties: + cron: + type: string + enable: + type: boolean + required: + - cron + - enable + type: object + cleanup: + type: boolean + deletionProtected: + type: boolean + instance: + type: string + postgres: + description: Postgres struct should be used to provide resource that + only applicable to postgres + properties: + dropPublicSchema: + description: If set to true, the public schema will be dropped + after the database creation + type: boolean + extensions: + items: + type: string + type: array + schemas: + description: Specify schemas to be created. The user created by + db-operator will have all access on them. + items: + type: string + type: array + type: object + secretName: + type: string + secretsTemplates: + additionalProperties: + type: string + type: object + required: + - backup + - deletionProtected + - instance + - secretName + type: object + status: + description: DatabaseStatus defines the observed state of Database + properties: + database: + type: string + instanceRef: + description: DbInstance is the Schema for the dbinstances API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DbInstanceSpec defines the desired state of DbInstance + properties: + adminSecretRef: + description: NamespacedName is a fork of the kubernetes api + type of the same name. Sadly this is required because CRD + structs must have all fields json tagged and the kubernetes + type is not tagged. + properties: + Name: + type: string + Namespace: + type: string + required: + - Name + - Namespace + type: object + backup: + description: DbInstanceBackup defines name of google bucket + to use for storing database dumps for backup when backup + is enabled + properties: + bucket: + type: string + required: + - bucket + type: object + engine: + description: 'Important: Run "make generate" to regenerate + code after modifying this file' + type: string + generic: + description: GenericInstance is used when instance type is + generic and describes necessary informations to use instance + generic instance can be any backend, it must be reachable + by described address and port + properties: + backupHost: + description: BackupHost address will be used for dumping + database for backup Usually secondary address for primary-secondary + setup or cluster lb address If it's not defined, above + Host will be used as backup host address. + type: string + host: + type: string + port: + type: integer + publicIp: + type: string + required: + - host + - port + type: object + google: + description: GoogleInstance is used when instance type is + Google Cloud SQL and describes necessary informations to + use google API to create sql instances + properties: + apiEndpoint: + type: string + clientSecretRef: + description: NamespacedName is a fork of the kubernetes + api type of the same name. Sadly this is required because + CRD structs must have all fields json tagged and the + kubernetes type is not tagged. + properties: + Name: + type: string + Namespace: + type: string + required: + - Name + - Namespace + type: object + configmapRef: + description: NamespacedName is a fork of the kubernetes + api type of the same name. Sadly this is required because + CRD structs must have all fields json tagged and the + kubernetes type is not tagged. + properties: + Name: + type: string + Namespace: + type: string + required: + - Name + - Namespace + type: object + instance: + type: string + required: + - configmapRef + - instance + type: object + monitoring: + description: DbInstanceMonitoring defines if exporter + properties: + enabled: + type: boolean + required: + - enabled + type: object + sslConnection: + description: DbInstanceSSLConnection defines weather connection + from db-operator to instance has to be ssl or not + properties: + enabled: + type: boolean + skip-verify: + description: SkipVerity use SSL connection, but don't + check against a CA + type: boolean + required: + - enabled + - skip-verify + type: object + required: + - adminSecretRef + - engine + type: object + status: + description: DbInstanceStatus defines the observed state of DbInstance + properties: + checksums: + additionalProperties: + type: string + type: object + info: + additionalProperties: + type: string + type: object + phase: + description: 'Important: Run "make generate" to regenerate + code after modifying this file' + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + monitorUserSecret: + type: string + phase: + description: 'Important: Run "make generate" to regenerate code after + modifying this file Add custom validation using kubebuilder tags: + https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' + type: string + proxyStatus: + description: DatabaseProxyStatus defines whether proxy for database + is enabled or not if so, provide information + properties: + serviceName: + type: string + sqlPort: + format: int32 + type: integer + status: + type: boolean + required: + - serviceName + - sqlPort + - status + type: object + status: + type: boolean + user: + type: string + required: + - database + - instanceRef + - phase + - status + - user + type: object + type: object + served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/kci.rocks_dbinstances.yaml b/config/crd/bases/kci.rocks_dbinstances.yaml index 65061e90..e389ae6a 100644 --- a/config/crd/bases/kci.rocks_dbinstances.yaml +++ b/config/crd/bases/kci.rocks_dbinstances.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.11.3 creationTimestamp: null name: dbinstances.kci.rocks spec: @@ -185,12 +184,176 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: current phase + jsonPath: .status.phase + name: Phase + type: string + - description: health status + jsonPath: .status.status + name: Status + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: DbInstance is the Schema for the dbinstances API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DbInstanceSpec defines the desired state of DbInstance + properties: + adminSecretRef: + description: NamespacedName is a fork of the kubernetes api type of + the same name. Sadly this is required because CRD structs must have + all fields json tagged and the kubernetes type is not tagged. + properties: + Name: + type: string + Namespace: + type: string + required: + - Name + - Namespace + type: object + backup: + description: DbInstanceBackup defines name of google bucket to use + for storing database dumps for backup when backup is enabled + properties: + bucket: + type: string + required: + - bucket + type: object + engine: + description: 'Important: Run "make generate" to regenerate code after + modifying this file' + type: string + generic: + description: GenericInstance is used when instance type is generic + and describes necessary informations to use instance generic instance + can be any backend, it must be reachable by described address and + port + properties: + backupHost: + description: BackupHost address will be used for dumping database + for backup Usually secondary address for primary-secondary setup + or cluster lb address If it's not defined, above Host will be + used as backup host address. + type: string + host: + type: string + port: + type: integer + publicIp: + type: string + required: + - host + - port + type: object + google: + description: GoogleInstance is used when instance type is Google Cloud + SQL and describes necessary informations to use google API to create + sql instances + properties: + apiEndpoint: + type: string + clientSecretRef: + description: NamespacedName is a fork of the kubernetes api type + of the same name. Sadly this is required because CRD structs + must have all fields json tagged and the kubernetes type is + not tagged. + properties: + Name: + type: string + Namespace: + type: string + required: + - Name + - Namespace + type: object + configmapRef: + description: NamespacedName is a fork of the kubernetes api type + of the same name. Sadly this is required because CRD structs + must have all fields json tagged and the kubernetes type is + not tagged. + properties: + Name: + type: string + Namespace: + type: string + required: + - Name + - Namespace + type: object + instance: + type: string + required: + - configmapRef + - instance + type: object + monitoring: + description: DbInstanceMonitoring defines if exporter + properties: + enabled: + type: boolean + required: + - enabled + type: object + sslConnection: + description: DbInstanceSSLConnection defines weather connection from + db-operator to instance has to be ssl or not + properties: + enabled: + type: boolean + skip-verify: + description: SkipVerity use SSL connection, but don't check against + a CA + type: boolean + required: + - enabled + - skip-verify + type: object + required: + - adminSecretRef + - engine + type: object + status: + description: DbInstanceStatus defines the observed state of DbInstance + properties: + checksums: + additionalProperties: + type: string + type: object + info: + additionalProperties: + type: string + type: object + phase: + description: 'Important: Run "make generate" to regenerate code after + modifying this file' + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 00000000..da8091c8 --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,24 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/kci.rocks_dbinstances.yaml +- bases/kci.rocks_databases.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +- patches/webhook_in_dbinstances.yaml +- patches/webhook_in_databases.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +- patches/cainjection_in_dbinstances.yaml +- patches/cainjection_in_databases.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/webhook_in_databases.yaml b/config/crd/patches/webhook_in_databases.yaml index ccf9d688..67319c50 100644 --- a/config/crd/patches/webhook_in_databases.yaml +++ b/config/crd/patches/webhook_in_databases.yaml @@ -12,3 +12,5 @@ spec: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_dbinstances.yaml b/config/crd/patches/webhook_in_dbinstances.yaml index 78835c94..79eb7953 100644 --- a/config/crd/patches/webhook_in_dbinstances.yaml +++ b/config/crd/patches/webhook_in_dbinstances.yaml @@ -12,3 +12,5 @@ spec: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000..738de350 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 00000000..a387e0dc --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,29 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: mutatingwebhookconfiguration + app.kubernetes.io/instance: mutating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: validatingwebhookconfiguration + app.kubernetes.io/instance: validating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/rbac/dbinstance_editor_role.yaml b/config/rbac/dbinstance_editor_role.yaml new file mode 100644 index 00000000..22525615 --- /dev/null +++ b/config/rbac/dbinstance_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit dbinstances. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dbinstance-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: dbinstance-editor-role +rules: +- apiGroups: + - kci.rocks + resources: + - dbinstances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - kci.rocks + resources: + - dbinstances/status + verbs: + - get diff --git a/config/rbac/dbinstance_viewer_role.yaml b/config/rbac/dbinstance_viewer_role.yaml new file mode 100644 index 00000000..95a0b6a8 --- /dev/null +++ b/config/rbac/dbinstance_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view dbinstances. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dbinstance-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: dbinstance-viewer-role +rules: +- apiGroups: + - kci.rocks + resources: + - dbinstances + verbs: + - get + - list + - watch +- apiGroups: + - kci.rocks + resources: + - dbinstances/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7cd02fac..ad6b5158 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,4 +1,3 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/config/samples/_v1beta1_dbinstance.yaml b/config/samples/_v1beta1_dbinstance.yaml new file mode 100644 index 00000000..09aaf804 --- /dev/null +++ b/config/samples/_v1beta1_dbinstance.yaml @@ -0,0 +1,12 @@ +apiVersion: kci.rocks/v1beta1 +kind: DbInstance +metadata: + labels: + app.kubernetes.io/name: dbinstance + app.kubernetes.io/instance: dbinstance-sample + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: db-operator + name: dbinstance-sample +spec: + # TODO(user): Add fields here diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000..9cf26134 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000..25e21e3c --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000..4537042f --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-kci-rocks-v1beta1-database + failurePolicy: Fail + name: mdatabase.kb.io + rules: + - apiGroups: + - kci.rocks + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-kci-rocks-v1beta1-dbinstance + failurePolicy: Fail + name: mdbinstance.kb.io + rules: + - apiGroups: + - kci.rocks + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - dbinstances + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-kci-rocks-v1beta1-database + failurePolicy: Fail + name: vdatabase.kb.io + rules: + - apiGroups: + - kci.rocks + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-kci-rocks-v1beta1-dbinstance + failurePolicy: Fail + name: vdbinstance.kb.io + rules: + - apiGroups: + - kci.rocks + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - dbinstances + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000..d5d2b4ae --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,20 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: service + app.kubernetes.io/instance: webhook-service + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: db-operator + app.kubernetes.io/part-of: db-operator + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/controllers/backup/cronjob.go b/controllers/backup/cronjob.go index 74a7e324..73d68e7a 100644 --- a/controllers/backup/cronjob.go +++ b/controllers/backup/cronjob.go @@ -20,7 +20,7 @@ import ( "errors" "fmt" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/config" "github.com/kloeckner-i/db-operator/pkg/utils/kci" "github.com/sirupsen/logrus" @@ -34,7 +34,7 @@ import ( // GCSBackupCron builds kubernetes cronjob object // to create database backup regularly with defined schedule from dbcr // this job will database dump and upload to google bucket storage for backup -func GCSBackupCron(conf *config.Config, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) (*batchv1beta1.CronJob, error) { +func GCSBackupCron(conf *config.Config, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) (*batchv1beta1.CronJob, error) { cronJobSpec, err := buildCronJobSpec(conf, dbcr) if err != nil { return nil, err @@ -55,7 +55,7 @@ func GCSBackupCron(conf *config.Config, dbcr *kciv1alpha1.Database, ownership [] }, nil } -func buildCronJobSpec(conf *config.Config, dbcr *kciv1alpha1.Database) (batchv1beta1.CronJobSpec, error) { +func buildCronJobSpec(conf *config.Config, dbcr *kciv1beta1.Database) (batchv1beta1.CronJobSpec, error) { jobTemplate, err := buildJobTemplate(conf, dbcr) if err != nil { return batchv1beta1.CronJobSpec{}, err @@ -67,7 +67,7 @@ func buildCronJobSpec(conf *config.Config, dbcr *kciv1alpha1.Database) (batchv1b }, nil } -func buildJobTemplate(conf *config.Config, dbcr *kciv1alpha1.Database) (batchv1beta1.JobTemplateSpec, error) { +func buildJobTemplate(conf *config.Config, dbcr *kciv1beta1.Database) (batchv1beta1.JobTemplateSpec, error) { ActiveDeadlineSeconds := int64(conf.Backup.ActiveDeadlineSeconds) BackoffLimit := int32(3) instance, err := dbcr.GetInstanceRef() @@ -148,7 +148,7 @@ func getResourceRequirements(conf *config.Config) v1.ResourceRequirements { return resourceRequirements } -func postgresBackupContainer(conf *config.Config, dbcr *kciv1alpha1.Database) (v1.Container, error) { +func postgresBackupContainer(conf *config.Config, dbcr *kciv1beta1.Database) (v1.Container, error) { env, err := postgresEnvVars(conf, dbcr) if err != nil { return v1.Container{}, err @@ -164,7 +164,7 @@ func postgresBackupContainer(conf *config.Config, dbcr *kciv1alpha1.Database) (v }, nil } -func mysqlBackupContainer(conf *config.Config, dbcr *kciv1alpha1.Database) (v1.Container, error) { +func mysqlBackupContainer(conf *config.Config, dbcr *kciv1beta1.Database) (v1.Container, error) { env, err := mysqlEnvVars(dbcr) if err != nil { return v1.Container{}, err @@ -193,7 +193,7 @@ func volumeMounts() []v1.VolumeMount { } } -func volumes(dbcr *kciv1alpha1.Database) []v1.Volume { +func volumes(dbcr *kciv1beta1.Database) []v1.Volume { return []v1.Volume{ { Name: "gcloud-secret", @@ -214,7 +214,7 @@ func volumes(dbcr *kciv1alpha1.Database) []v1.Volume { } } -func postgresEnvVars(conf *config.Config, dbcr *kciv1alpha1.Database) ([]v1.EnvVar, error) { +func postgresEnvVars(conf *config.Config, dbcr *kciv1beta1.Database) ([]v1.EnvVar, error) { instance, err := dbcr.GetInstanceRef() if err != nil { logrus.Errorf("can not build backup environment variables - %s", err) @@ -263,7 +263,7 @@ func postgresEnvVars(conf *config.Config, dbcr *kciv1alpha1.Database) ([]v1.EnvV return envList, nil } -func mysqlEnvVars(dbcr *kciv1alpha1.Database) ([]v1.EnvVar, error) { +func mysqlEnvVars(dbcr *kciv1beta1.Database) ([]v1.EnvVar, error) { instance, err := dbcr.GetInstanceRef() if err != nil { logrus.Errorf("can not build backup environment variables - %s", err) @@ -308,7 +308,7 @@ func mysqlEnvVars(dbcr *kciv1alpha1.Database) ([]v1.EnvVar, error) { }, nil } -func getBackupHost(dbcr *kciv1alpha1.Database) (string, error) { +func getBackupHost(dbcr *kciv1beta1.Database) (string, error) { host := "" instance, err := dbcr.GetInstanceRef() diff --git a/controllers/backup/cronjob_test.go b/controllers/backup/cronjob_test.go index 01893f0b..eb1aab0f 100644 --- a/controllers/backup/cronjob_test.go +++ b/controllers/backup/cronjob_test.go @@ -21,7 +21,7 @@ import ( "os" "testing" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/config" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" @@ -31,12 +31,12 @@ import ( func TestGCSBackupCronGsql(t *testing.T) { ownership := []metav1.OwnerReference{} - dbcr := &kciv1alpha1.Database{} + dbcr := &kciv1beta1.Database{} dbcr.Namespace = "TestNS" dbcr.Name = "TestDB" - instance := &kciv1alpha1.DbInstance{} + instance := &kciv1beta1.DbInstance{} instance.Status.Info = map[string]string{"DB_CONN": "TestConnection", "DB_PORT": "1234"} - instance.Spec.Google = &kciv1alpha1.GoogleInstance{InstanceName: "google-instance-1"} + instance.Spec.Google = &kciv1beta1.GoogleInstance{InstanceName: "google-instance-1"} dbcr.Status.InstanceRef = instance dbcr.Spec.Instance = "staging" dbcr.Spec.Backup.Cron = "* * * * *" @@ -67,12 +67,12 @@ func TestGCSBackupCronGsql(t *testing.T) { func TestGCSBackupCronGeneric(t *testing.T) { ownership := []metav1.OwnerReference{} - dbcr := &kciv1alpha1.Database{} + dbcr := &kciv1beta1.Database{} dbcr.Namespace = "TestNS" dbcr.Name = "TestDB" - instance := &kciv1alpha1.DbInstance{} + instance := &kciv1beta1.DbInstance{} instance.Status.Info = map[string]string{"DB_CONN": "TestConnection", "DB_PORT": "1234"} - instance.Spec.Generic = &kciv1alpha1.GenericInstance{BackupHost: "slave.test"} + instance.Spec.Generic = &kciv1beta1.GenericInstance{BackupHost: "slave.test"} dbcr.Status.InstanceRef = instance dbcr.Spec.Instance = "staging" dbcr.Spec.Backup.Cron = "* * * * *" @@ -106,16 +106,16 @@ func TestGCSBackupCronGenericWithOwnerReference(t *testing.T) { ownership := []metav1.OwnerReference{} ownership = append(ownership, metav1.OwnerReference{ APIVersion: "api-version", - Kind: "kind", - Name: "name", - UID: "uid", + Kind: "kind", + Name: "name", + UID: "uid", }) - dbcr := &kciv1alpha1.Database{} + dbcr := &kciv1beta1.Database{} dbcr.Namespace = "TestNS" dbcr.Name = "TestDB" - instance := &kciv1alpha1.DbInstance{} + instance := &kciv1beta1.DbInstance{} instance.Status.Info = map[string]string{"DB_CONN": "TestConnection", "DB_PORT": "1234"} - instance.Spec.Generic = &kciv1alpha1.GenericInstance{BackupHost: "slave.test"} + instance.Spec.Generic = &kciv1beta1.GenericInstance{BackupHost: "slave.test"} dbcr.Status.InstanceRef = instance dbcr.Spec.Instance = "staging" dbcr.Spec.Backup.Cron = "* * * * *" diff --git a/controllers/database_controller.go b/controllers/database_controller.go index fa779a1e..c9bb41aa 100644 --- a/controllers/database_controller.go +++ b/controllers/database_controller.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/go-logr/logr" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/controllers/backup" "github.com/kloeckner-i/db-operator/pkg/config" "github.com/kloeckner-i/db-operator/pkg/utils/database" @@ -90,13 +90,12 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c reconcilePeriod := r.Interval * time.Second reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod} - // Fetch the Database custom resource - dbcr := &kciv1alpha1.Database{} + dbcr := &kciv1beta1.Database{} err := r.Get(ctx, req.NamespacedName, dbcr) if err != nil { if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. + // Requested object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue return reconcileResult, nil @@ -255,18 +254,18 @@ func (r *DatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { } return ctrl.NewControllerManagedBy(mgr). - For(&kciv1alpha1.Database{}). + For(&kciv1beta1.Database{}). WithEventFilter(eventFilter). Watches(&source.Kind{Type: &corev1.Secret{}}, &secretEventHandler{r.Client}). Complete(r) } -func (r *DatabaseReconciler) initialize(ctx context.Context, dbcr *kciv1alpha1.Database) error { - dbcr.Status = kciv1alpha1.DatabaseStatus{} +func (r *DatabaseReconciler) initialize(ctx context.Context, dbcr *kciv1beta1.Database) error { + dbcr.Status = kciv1beta1.DatabaseStatus{} dbcr.Status.Status = false if dbcr.Spec.Instance != "" { - instance := &kciv1alpha1.DbInstance{} + instance := &kciv1beta1.DbInstance{} key := types.NamespacedName{ Namespace: "", Name: dbcr.Spec.Instance, @@ -288,7 +287,7 @@ func (r *DatabaseReconciler) initialize(ctx context.Context, dbcr *kciv1alpha1.D } // createDatabase secret, actual database using admin secret -func (r *DatabaseReconciler) createDatabase(ctx context.Context, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) error { +func (r *DatabaseReconciler) createDatabase(ctx context.Context, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) error { databaseSecret, err := r.getDatabaseSecret(ctx, dbcr) if err != nil { if k8serrors.IsNotFound(err) { @@ -361,7 +360,7 @@ func (r *DatabaseReconciler) createDatabase(ctx context.Context, dbcr *kciv1alph return nil } -func (r *DatabaseReconciler) deleteDatabase(ctx context.Context, dbcr *kciv1alpha1.Database) error { +func (r *DatabaseReconciler) deleteDatabase(ctx context.Context, dbcr *kciv1beta1.Database) error { if dbcr.Spec.DeletionProtected { logrus.Infof("DB: namespace=%s, name=%s is deletion protected. will not be deleted in backends", dbcr.Name, dbcr.Namespace) return nil @@ -398,7 +397,7 @@ func (r *DatabaseReconciler) deleteDatabase(ctx context.Context, dbcr *kciv1alph return nil } -func (r *DatabaseReconciler) createInstanceAccessSecret(ctx context.Context, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) error { +func (r *DatabaseReconciler) createInstanceAccessSecret(ctx context.Context, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) error { if backend, _ := dbcr.GetBackendType(); backend != "google" { logrus.Debugf("DB: namespace=%s, name=%s %s doesn't need instance access secret skipping...", dbcr.Namespace, dbcr.Name, backend) return nil @@ -452,7 +451,7 @@ func (r *DatabaseReconciler) createInstanceAccessSecret(ctx context.Context, dbc return nil } -func (r *DatabaseReconciler) createProxy(ctx context.Context, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) error { +func (r *DatabaseReconciler) createProxy(ctx context.Context, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) error { backend, _ := dbcr.GetBackendType() if backend == "generic" { logrus.Infof("DB: namespace=%s, name=%s %s proxy creation is not yet implemented skipping...", dbcr.Namespace, dbcr.Name, backend) @@ -577,57 +576,18 @@ func (r *DatabaseReconciler) createProxy(ctx context.Context, dbcr *kciv1alpha1. return nil } -func (r *DatabaseReconciler) createTemplatedSecrets(ctx context.Context, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) error { +func (r *DatabaseReconciler) createTemplatedSecrets(ctx context.Context, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) error { // First of all the password should be taken from secret because it's not stored anywhere else databaseSecret, err := r.getDatabaseSecret(ctx, dbcr) if err != nil { return err } - // Then parse the secret to get the password - // Connection string is deprecated and will be removed soon. So this switch is temporary. - // Once connection string is removed, the switch and the following if condition are gone - // Connection String doesn't support the cleaning up feature, so the secret with a connection - // string won't be removed after a db resource is removed. - useLegacyConnectionString := false - switch { - case len(dbcr.Spec.ConnectionStringTemplate) > 0 && len(dbcr.Spec.SecretsTemplates) > 0: - logrus.Warnf("DB: namespace=%s, name=%s connectionStringTemplate will be ignored since secretsTemplates is not empty", - dbcr.Namespace, - dbcr.Name, - ) - case len(dbcr.Spec.ConnectionStringTemplate) > 0: - logrus.Warnf("DB: namespace=%s, name=%s connectionStringTemplate is deprecated and will be removed in the near future, consider using secretsTemplates", - dbcr.Namespace, - dbcr.Name, - ) - useLegacyConnectionString = true - default: - logrus.Infof("DB: namespace=%s, name=%s generating secrets", dbcr.Namespace, dbcr.Name) - } - - databaseCred, err := parseTemplatedSecretsData(dbcr, databaseSecret.Data, useLegacyConnectionString) + + databaseCred, err := parseTemplatedSecretsData(dbcr, databaseSecret.Data) if err != nil { return err } - if useLegacyConnectionString { - // Generate the connection string - dbConnectionString, err := generateConnectionString(dbcr, databaseCred) - if err != nil { - return err - } - // Update database-credentials secret. - if databaseCred.TemplatedSecrets["CONNECTION_STRING"] == dbConnectionString { - return nil - } - logrus.Debugf("DB: namespace=%s, name=%s updating credentials secret", dbcr.Namespace, dbcr.Name) - newSecret := addConnectionStringToSecret(dbcr, databaseSecret.Data, dbConnectionString) - if err = r.Update(ctx, newSecret, &client.UpdateOptions{}); err != nil { - return err - } - return nil - } - dbSecrets, err := generateTemplatedSecrets(dbcr, databaseCred) if err != nil { return err @@ -646,7 +606,7 @@ func (r *DatabaseReconciler) createTemplatedSecrets(ctx context.Context, dbcr *k return nil } -func (r *DatabaseReconciler) createInfoConfigMap(ctx context.Context, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) error { +func (r *DatabaseReconciler) createInfoConfigMap(ctx context.Context, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) error { instance, err := dbcr.GetInstanceRef() if err != nil { return err @@ -680,7 +640,7 @@ func (r *DatabaseReconciler) createInfoConfigMap(ctx context.Context, dbcr *kciv return nil } -func (r *DatabaseReconciler) createBackupJob(ctx context.Context, dbcr *kciv1alpha1.Database, ownership []metav1.OwnerReference) error { +func (r *DatabaseReconciler) createBackupJob(ctx context.Context, dbcr *kciv1beta1.Database, ownership []metav1.OwnerReference) error { if !dbcr.Spec.Backup.Enable { // if not enabled, skip return nil @@ -715,7 +675,7 @@ func (r *DatabaseReconciler) createBackupJob(ctx context.Context, dbcr *kciv1alp return nil } -func (r *DatabaseReconciler) getDatabaseSecret(ctx context.Context, dbcr *kciv1alpha1.Database) (*corev1.Secret, error) { +func (r *DatabaseReconciler) getDatabaseSecret(ctx context.Context, dbcr *kciv1beta1.Database) (*corev1.Secret, error) { secret := &corev1.Secret{} key := types.NamespacedName{ Namespace: dbcr.Namespace, @@ -729,7 +689,7 @@ func (r *DatabaseReconciler) getDatabaseSecret(ctx context.Context, dbcr *kciv1a return secret, nil } -func (r *DatabaseReconciler) annotateDatabaseSecret(ctx context.Context, dbcr *kciv1alpha1.Database, secret *corev1.Secret) error { +func (r *DatabaseReconciler) annotateDatabaseSecret(ctx context.Context, dbcr *kciv1beta1.Database, secret *corev1.Secret) error { annotations := secret.ObjectMeta.GetAnnotations() if len(annotations) == 0 { annotations = make(map[string]string) @@ -740,7 +700,7 @@ func (r *DatabaseReconciler) annotateDatabaseSecret(ctx context.Context, dbcr *k return r.Update(ctx, secret) } -func (r *DatabaseReconciler) getAdminSecret(ctx context.Context, dbcr *kciv1alpha1.Database) (*corev1.Secret, error) { +func (r *DatabaseReconciler) getAdminSecret(ctx context.Context, dbcr *kciv1beta1.Database) (*corev1.Secret, error) { instance, err := dbcr.GetInstanceRef() if err != nil { // failed to get DbInstanceRef this case should not happen @@ -758,7 +718,7 @@ func (r *DatabaseReconciler) getAdminSecret(ctx context.Context, dbcr *kciv1alph return secret, nil } -func (r *DatabaseReconciler) manageError(ctx context.Context, dbcr *kciv1alpha1.Database, issue error, requeue bool) (reconcile.Result, error) { +func (r *DatabaseReconciler) manageError(ctx context.Context, dbcr *kciv1beta1.Database, issue error, requeue bool) (reconcile.Result, error) { dbcr.Status.Status = false logrus.Errorf("DB: namespace=%s, name=%s failed %s - %s", dbcr.Namespace, dbcr.Name, dbcr.Status.Phase, issue) promDBsPhaseError.WithLabelValues(dbcr.Status.Phase).Inc() diff --git a/controllers/database_helper.go b/controllers/database_helper.go index 4573b5fb..c408381f 100644 --- a/controllers/database_helper.go +++ b/controllers/database_helper.go @@ -22,7 +22,7 @@ import ( "strconv" "text/template" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/utils/database" "github.com/kloeckner-i/db-operator/pkg/utils/kci" "github.com/sirupsen/logrus" @@ -54,7 +54,7 @@ func getBlockedTempatedKeys() []string { return []string{fieldMysqlDB, fieldMysqlPassword, fieldMysqlUser, fieldPostgresDB, fieldPostgresUser, fieldPostgressPassword} } -func determinDatabaseType(dbcr *kciv1alpha1.Database, dbCred database.Credentials) (database.Database, error) { +func determinDatabaseType(dbcr *kciv1beta1.Database, dbCred database.Credentials) (database.Database, error) { instance, err := dbcr.GetInstanceRef() if err != nil { logrus.Errorf("could not get instance ref %s - %s", dbcr.Name, err) @@ -88,7 +88,7 @@ func determinDatabaseType(dbcr *kciv1alpha1.Database, dbCred database.Credential switch engine { case "postgres": - extList := dbcr.Spec.Extensions + extList := dbcr.Spec.Postgres.Extensions db := database.Postgres{ Backend: backend, Host: host, @@ -125,39 +125,30 @@ func determinDatabaseType(dbcr *kciv1alpha1.Database, dbCred database.Credential } } -func parseTemplatedSecretsData(dbcr *kciv1alpha1.Database, data map[string][]byte, useLegacyConnStr bool) (database.Credentials, error) { +func parseTemplatedSecretsData(dbcr *kciv1beta1.Database, data map[string][]byte) (database.Credentials, error) { cred, err := parseDatabaseSecretData(dbcr, data) if err != nil { return cred, err } cred.TemplatedSecrets = map[string]string{} - - if useLegacyConnStr { - if connectionString, ok := data["CONNECTION_STRING"]; ok { - cred.TemplatedSecrets["CONNECTION_STRING"] = string(connectionString) + for key := range dbcr.Spec.SecretsTemplates { + // Here we can see if there are obsolete entries in the secret data + if secret, ok := data[key]; ok { + delete(data, key) + cred.TemplatedSecrets[key] = string(secret) } else { - logrus.Infof("DB: namespace=%s, name=%s CONNECTION_STRING key does not exist in the secret data", dbcr.Namespace, dbcr.Name) - } - } else { - for key := range dbcr.Spec.SecretsTemplates { - // Here we can see if there are obsolete entries in the secret data - if secret, ok := data[key]; ok { - delete(data, key) - cred.TemplatedSecrets[key] = string(secret) - } else { - logrus.Infof("DB: namespace=%s, name=%s %s key does not exist in secret data", - dbcr.Namespace, - dbcr.Name, - key, - ) - } + logrus.Infof("DB: namespace=%s, name=%s %s key does not exist in secret data", + dbcr.Namespace, + dbcr.Name, + key, + ) } } return cred, nil } -func parseDatabaseSecretData(dbcr *kciv1alpha1.Database, data map[string][]byte) (database.Credentials, error) { +func parseDatabaseSecretData(dbcr *kciv1beta1.Database, data map[string][]byte) (database.Credentials, error) { cred := database.Credentials{} engine, err := dbcr.GetEngineType() if err != nil { @@ -210,7 +201,7 @@ func parseDatabaseSecretData(dbcr *kciv1alpha1.Database, data map[string][]byte) } } -func generateDatabaseSecretData(dbcr *kciv1alpha1.Database) (map[string][]byte, error) { +func generateDatabaseSecretData(dbcr *kciv1beta1.Database) (map[string][]byte, error) { const ( // https://dev.mysql.com/doc/refman/5.7/en/identifier-length.html mysqlDBNameLengthLimit = 63 @@ -246,63 +237,7 @@ func generateDatabaseSecretData(dbcr *kciv1alpha1.Database) (map[string][]byte, } } -func generateConnectionString(dbcr *kciv1alpha1.Database, databaseCred database.Credentials) (connString string, err error) { - // The string that's going to be generated if the default template is used: - // "postgresql://user:password@host:port/database" - const defaultTemplate = "{{ .Protocol }}://{{ .UserName }}:{{ .Password }}@{{ .DatabaseHost }}:{{ .DatabasePort }}/{{ .DatabaseName }}" - - dbData := SecretsTemplatesFields{ - DatabaseHost: dbcr.Status.ProxyStatus.ServiceName, - DatabasePort: dbcr.Status.ProxyStatus.SQLPort, - UserName: databaseCred.Username, - Password: databaseCred.Password, - DatabaseName: databaseCred.Name, - } - - // If proxy is not used, set a real database address - if !dbcr.Status.ProxyStatus.Status { - db, err := determinDatabaseType(dbcr, databaseCred) - if err != nil { - return "", err - } - dbAddress := db.GetDatabaseAddress() - dbData.DatabaseHost = dbAddress.Host - dbData.DatabasePort = int32(dbAddress.Port) - } - - // If engine is 'postgres', the protocol should be postgresql - if dbcr.Status.InstanceRef.Spec.Engine == "postgres" { - dbData.Protocol = "postgresql" - } else { - dbData.Protocol = dbcr.Status.InstanceRef.Spec.Engine - } - - // If dbcr.Spec.ConnectionString is not specified, use the defalt template - var tmpl string - if dbcr.Spec.ConnectionStringTemplate != "" { - tmpl = dbcr.Spec.ConnectionStringTemplate - } else { - tmpl = defaultTemplate - } - - t, err := template.New("connection_string").Parse(tmpl) - if err != nil { - logrus.Error(err) - return - } - - var connStringBytes bytes.Buffer - err = t.Execute(&connStringBytes, dbData) - if err != nil { - logrus.Error(err) - return - } - - connString = connStringBytes.String() - return -} - -func generateTemplatedSecrets(dbcr *kciv1alpha1.Database, databaseCred database.Credentials) (secrets map[string]string, err error) { +func generateTemplatedSecrets(dbcr *kciv1beta1.Database, databaseCred database.Credentials) (secrets map[string]string, err error) { secrets = map[string]string{} templates := map[string]string{} if len(dbcr.Spec.SecretsTemplates) > 0 { @@ -357,7 +292,7 @@ func generateTemplatedSecrets(dbcr *kciv1alpha1.Database, databaseCred database. return secrets, nil } -func fillTemplatedSecretData(dbcr *kciv1alpha1.Database, secretData map[string][]byte, newSecretFields map[string]string, ownership []metav1.OwnerReference) (newSecret *v1.Secret) { +func fillTemplatedSecretData(dbcr *kciv1beta1.Database, secretData map[string][]byte, newSecretFields map[string]string, ownership []metav1.OwnerReference) (newSecret *v1.Secret) { blockedTempatedKeys := getBlockedTempatedKeys() for key, value := range newSecretFields { if slices.Contains(blockedTempatedKeys, key) { @@ -373,17 +308,12 @@ func fillTemplatedSecretData(dbcr *kciv1alpha1.Database, secretData map[string][ return } -func addConnectionStringToSecret(dbcr *kciv1alpha1.Database, secretData map[string][]byte, connectionString string) *v1.Secret { - secretData["CONNECTION_STRING"] = []byte(connectionString) - return kci.SecretBuilder(dbcr.Spec.SecretName, dbcr.GetNamespace(), secretData, []metav1.OwnerReference{}) -} - -func addTemplatedSecretToSecret(dbcr *kciv1alpha1.Database, secretData map[string][]byte, secretName string, secretValue string, ownership []metav1.OwnerReference) *v1.Secret { +func addTemplatedSecretToSecret(dbcr *kciv1beta1.Database, secretData map[string][]byte, secretName string, secretValue string, ownership []metav1.OwnerReference) *v1.Secret { secretData[secretName] = []byte(secretValue) return kci.SecretBuilder(dbcr.Spec.SecretName, dbcr.GetNamespace(), secretData, ownership) } -func removeObsoleteSecret(dbcr *kciv1alpha1.Database, secretData map[string][]byte, newSecretFields map[string]string, ownership []metav1.OwnerReference) *v1.Secret { +func removeObsoleteSecret(dbcr *kciv1beta1.Database, secretData map[string][]byte, newSecretFields map[string]string, ownership []metav1.OwnerReference) *v1.Secret { blockedTempatedKeys := getBlockedTempatedKeys() for key := range secretData { diff --git a/controllers/database_helper_test.go b/controllers/database_helper_test.go index 0b1f6624..eec49a74 100644 --- a/controllers/database_helper_test.go +++ b/controllers/database_helper_test.go @@ -24,8 +24,6 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ) var testDbcred = database.Credentials{Name: "testdb", Username: "testuser", Password: "password"} @@ -318,131 +316,3 @@ func TestObsoleteFieldsRemoving(t *testing.T) { assert.Equal(t, newSecret.Data, expectedData, "generated connections string is wrong") } - -// Connection string tests should be removed later, when connection string is gone -func TestPsqlDefaultConnectionStringGeneratationWithProxy(t *testing.T) { - instance := newPostgresTestDbInstanceCr() - postgresDbCr := newPostgresTestDbCr(instance) - postgresDbCr.Status.ProxyStatus.Status = true - - c := SecretsTemplatesFields{ - DatabaseHost: "postgres", - DatabasePort: 5432, - UserName: testDbcred.Username, - Password: testDbcred.Password, - DatabaseName: testDbcred.Name, - } - - postgresDbCr.Status.ProxyStatus.SQLPort = c.DatabasePort - postgresDbCr.Status.ProxyStatus.ServiceName = c.DatabaseHost - - protocol := "postgresql" - expectedString := fmt.Sprintf("%s://%s:%s@%s:%d/%s", protocol, c.UserName, c.Password, c.DatabaseHost, c.DatabasePort, c.DatabaseName) - - connString, err := generateConnectionString(postgresDbCr, testDbcred) - if err != nil { - t.Logf("Unexpected error: %s", err) - t.Fail() - } - assert.Equal(t, expectedString, connString, "generated connections string is wrong") -} - -func TestPsqlDefaultConnectionStringGeneratationWithoutProxy(t *testing.T) { - instance := newPostgresTestDbInstanceCr() - postgresDbCr := newPostgresTestDbCr(instance) - - c := SecretsTemplatesFields{ - DatabaseHost: "postgres", - DatabasePort: 5432, - UserName: testDbcred.Username, - Password: testDbcred.Password, - DatabaseName: testDbcred.Name, - } - - protocol := "postgresql" - expectedString := fmt.Sprintf("%s://%s:%s@%s:%d/%s", protocol, c.UserName, c.Password, c.DatabaseHost, c.DatabasePort, c.DatabaseName) - - connString, err := generateConnectionString(postgresDbCr, testDbcred) - if err != nil { - t.Logf("Unexpected error: %s", err) - t.Fail() - } - assert.Equal(t, expectedString, connString, "generated connections string is wrong") -} - -func TestMysqlDefaultConnectionStringGeneratationWithoutProxy(t *testing.T) { - mysqlDbCr := newMysqlTestDbCr() - c := SecretsTemplatesFields{ - DatabaseHost: "mysql", - DatabasePort: 3306, - UserName: testDbcred.Username, - Password: testDbcred.Password, - DatabaseName: testDbcred.Name, - } - protocol := "mysql" - expectedString := fmt.Sprintf("%s://%s:%s@%s:%d/%s", protocol, c.UserName, c.Password, c.DatabaseHost, c.DatabasePort, c.DatabaseName) - - connString, err := generateConnectionString(mysqlDbCr, testDbcred) - if err != nil { - t.Logf("Unexpected error: %s", err) - t.Fail() - } - assert.Equal(t, connString, expectedString, "generated connections string is wrong") -} - -func TestAddingConnectionsStringToSecret(t *testing.T) { - instance := newPostgresTestDbInstanceCr() - postgresDbCr := newPostgresTestDbCr(instance) - secretData := map[string][]byte{ - "POSTGRES_DB": []byte("postgres"), - "POSTGRES_USER": []byte("root"), - "POSTGRES_PASSWORD": []byte("qwertyu9"), - } - - connectionString := "it's a dummy connection string" - - secret := addConnectionStringToSecret(postgresDbCr, secretData, connectionString) - secretData["CONNECTION_STRING"] = []byte(connectionString) - if val, ok := secret.Data["CONNECTION_STRING"]; ok { - assert.Equal(t, string(val), connectionString, "connections string in a secret contains unexpected values") - return - } -} - -func TestPsqlCustomConnectionStringGeneratation(t *testing.T) { - instance := newPostgresTestDbInstanceCr() - postgresDbCr := newPostgresTestDbCr(instance) - - prefix := "custom->" - postfix := "<-for_storing_data_you_know" - postgresDbCr.Spec.ConnectionStringTemplate = fmt.Sprintf("%s{{ .Protocol }}://{{ .UserName }}:{{ .Password }}@{{ .DatabaseHost }}:{{ .DatabasePort }}/{{ .DatabaseName }}%s", prefix, postfix) - - c := SecretsTemplatesFields{ - DatabaseHost: "postgres", - DatabasePort: 5432, - UserName: testDbcred.Username, - Password: testDbcred.Password, - DatabaseName: testDbcred.Name, - } - protocol := "postgresql" - expectedString := fmt.Sprintf("%s%s://%s:%s@%s:%d/%s%s", prefix, protocol, c.UserName, c.Password, c.DatabaseHost, c.DatabasePort, c.DatabaseName, postfix) - - connString, err := generateConnectionString(postgresDbCr, testDbcred) - if err != nil { - t.Logf("unexpected error: %s", err) - t.Fail() - } - assert.Equal(t, connString, expectedString, "generated connections string is wrong") -} - -func TestWrongTemplateConnectionStringGeneratation(t *testing.T) { - instance := newPostgresTestDbInstanceCr() - postgresDbCr := newPostgresTestDbCr(instance) - - postgresDbCr.Spec.ConnectionStringTemplate = "{{ .Protocol }}://{{ .User }}:{{ .Password }}@{{ .DatabaseHost }}:{{ .DatabasePort }}/{{ .DatabaseName }}" - - _, err := generateConnectionString(postgresDbCr, testDbcred) - errSubstr := "can't evaluate field User in type controllers.SecretsTemplatesFields" - - assert.Contains(t, err.Error(), errSubstr, "the error doesn't contain expected substring") -} diff --git a/controllers/database_secret_handler.go b/controllers/database_secret_handler.go index 3889c3b3..ba06fff5 100644 --- a/controllers/database_secret_handler.go +++ b/controllers/database_secret_handler.go @@ -19,7 +19,7 @@ package controllers import ( "strings" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -92,7 +92,7 @@ func isWatchedNamespace(watchNamespaces []string, ro runtime.Object) bool { } // define object's namespace objectNamespace := "" - database, isDatabase := ro.(*kciv1alpha1.Database) + database, isDatabase := ro.(*kciv1beta1.Database) if isDatabase { objectNamespace = database.Namespace } else { @@ -115,7 +115,7 @@ func isWatchedNamespace(watchNamespaces []string, ro runtime.Object) bool { } func isDatabase(ro runtime.Object) bool { - _, isDatabase := ro.(*kciv1alpha1.Database) + _, isDatabase := ro.(*kciv1beta1.Database) return isDatabase } @@ -129,7 +129,7 @@ func isObjectUpdated(e event.UpdateEvent) bool { return false } // if object kind is a Database check that 'metadata.generation' field ('spec' section) has been changed - _, isDatabase := e.ObjectNew.(*kciv1alpha1.Database) + _, isDatabase := e.ObjectNew.(*kciv1beta1.Database) if isDatabase { return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() } diff --git a/controllers/dbinstance_controller.go b/controllers/dbinstance_controller.go index ffa390e9..69fe8f81 100644 --- a/controllers/dbinstance_controller.go +++ b/controllers/dbinstance_controller.go @@ -22,7 +22,7 @@ import ( "time" "github.com/go-logr/logr" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/config" "github.com/kloeckner-i/db-operator/pkg/utils/database" "github.com/kloeckner-i/db-operator/pkg/utils/dbinstance" @@ -74,7 +74,7 @@ func (r *DbInstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod} // Fetch the DbInstance custom resource - dbin := &kciv1alpha1.DbInstance{} + dbin := &kciv1beta1.DbInstance{} err := r.Get(ctx, req.NamespacedName, dbin) if err != nil { if k8serrors.IsNotFound(err) { @@ -147,11 +147,11 @@ func (r *DbInstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) // SetupWithManager sets up the controller with the Manager. func (r *DbInstanceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&kciv1alpha1.DbInstance{}). + For(&kciv1beta1.DbInstance{}). Complete(r) } -func (r *DbInstanceReconciler) create(ctx context.Context, dbin *kciv1alpha1.DbInstance) error { +func (r *DbInstanceReconciler) create(ctx context.Context, dbin *kciv1beta1.DbInstance) error { secret, err := kci.GetSecretResource(ctx, dbin.Spec.AdminUserSecret.ToKubernetesType()) if err != nil { logrus.Errorf("Instance: name=%s failed to get instance admin user secret %s/%s", dbin.Name, dbin.Spec.AdminUserSecret.Namespace, dbin.Spec.AdminUserSecret.Name) @@ -219,8 +219,8 @@ func (r *DbInstanceReconciler) create(ctx context.Context, dbin *kciv1alpha1.DbI return nil } -func (r *DbInstanceReconciler) broadcast(ctx context.Context, dbin *kciv1alpha1.DbInstance) error { - dbList := &kciv1alpha1.DatabaseList{} +func (r *DbInstanceReconciler) broadcast(ctx context.Context, dbin *kciv1beta1.DbInstance) error { + dbList := &kciv1beta1.DatabaseList{} err := r.List(ctx, dbList) if err != nil { return err @@ -243,7 +243,7 @@ func (r *DbInstanceReconciler) broadcast(ctx context.Context, dbin *kciv1alpha1. return nil } -func (r *DbInstanceReconciler) createProxy(ctx context.Context, dbin *kciv1alpha1.DbInstance, ownership []metav1.OwnerReference) error { +func (r *DbInstanceReconciler) createProxy(ctx context.Context, dbin *kciv1beta1.DbInstance, ownership []metav1.OwnerReference) error { proxyInterface, err := determineProxyTypeForInstance(r.Conf, dbin) if err != nil { if err == ErrNoProxySupport { diff --git a/controllers/helper.go b/controllers/helper.go index 00d566c0..34f87d6c 100644 --- a/controllers/helper.go +++ b/controllers/helper.go @@ -21,19 +21,19 @@ import ( corev1 "k8s.io/api/core/v1" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/utils/kci" crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -func isDBChanged(dbcr *kciv1alpha1.Database, databaseSecret *corev1.Secret) bool { +func isDBChanged(dbcr *kciv1beta1.Database, databaseSecret *corev1.Secret) bool { annotations := dbcr.ObjectMeta.GetAnnotations() return annotations["checksum/spec"] != kci.GenerateChecksum(dbcr.Spec) || annotations["checksum/secret"] != generateChecksumSecretValue(databaseSecret) } -func addDBChecksum(dbcr *kciv1alpha1.Database, databaseSecret *corev1.Secret) { +func addDBChecksum(dbcr *kciv1beta1.Database, databaseSecret *corev1.Secret) { annotations := dbcr.ObjectMeta.GetAnnotations() if len(annotations) == 0 { annotations = make(map[string]string) @@ -51,7 +51,7 @@ func generateChecksumSecretValue(databaseSecret *corev1.Secret) string { return kci.GenerateChecksum(databaseSecret.Data) } -func isDBInstanceSpecChanged(ctx context.Context, dbin *kciv1alpha1.DbInstance) bool { +func isDBInstanceSpecChanged(ctx context.Context, dbin *kciv1beta1.DbInstance) bool { checksums := dbin.Status.Checksums if checksums["spec"] != kci.GenerateChecksum(dbin.Spec) { return true @@ -67,7 +67,7 @@ func isDBInstanceSpecChanged(ctx context.Context, dbin *kciv1alpha1.DbInstance) return false } -func addDBInstanceChecksumStatus(ctx context.Context, dbin *kciv1alpha1.DbInstance) { +func addDBInstanceChecksumStatus(ctx context.Context, dbin *kciv1beta1.DbInstance) { checksums := dbin.Status.Checksums if len(checksums) == 0 { checksums = make(map[string]string) diff --git a/controllers/helper_test.go b/controllers/helper_test.go index b35c02c5..64559b60 100644 --- a/controllers/helper_test.go +++ b/controllers/helper_test.go @@ -22,7 +22,7 @@ import ( "testing" "bou.ke/monkey" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/test" "github.com/kloeckner-i/db-operator/pkg/utils/kci" "github.com/stretchr/testify/assert" @@ -36,32 +36,32 @@ const ( TestNamespace = "TestNS" ) -func newPostgresTestDbInstanceCr() kciv1alpha1.DbInstance { +func newPostgresTestDbInstanceCr() kciv1beta1.DbInstance { info := make(map[string]string) info["DB_PORT"] = "5432" info["DB_CONN"] = "postgres" - return kciv1alpha1.DbInstance{ - Spec: kciv1alpha1.DbInstanceSpec{ + return kciv1beta1.DbInstance{ + Spec: kciv1beta1.DbInstanceSpec{ Engine: "postgres", - DbInstanceSource: kciv1alpha1.DbInstanceSource{ - Generic: &kciv1alpha1.GenericInstance{ + DbInstanceSource: kciv1beta1.DbInstanceSource{ + Generic: &kciv1beta1.GenericInstance{ Host: test.GetPostgresHost(), Port: test.GetPostgresPort(), }, }, }, - Status: kciv1alpha1.DbInstanceStatus{Info: info}, + Status: kciv1beta1.DbInstanceStatus{Info: info}, } } -func newPostgresTestDbCr(instanceRef kciv1alpha1.DbInstance) *kciv1alpha1.Database { +func newPostgresTestDbCr(instanceRef kciv1beta1.DbInstance) *kciv1beta1.Database { o := metav1.ObjectMeta{Namespace: TestNamespace} - s := kciv1alpha1.DatabaseSpec{SecretName: TestSecretName} + s := kciv1beta1.DatabaseSpec{SecretName: TestSecretName} - db := kciv1alpha1.Database{ + db := kciv1beta1.Database{ ObjectMeta: o, Spec: s, - Status: kciv1alpha1.DatabaseStatus{ + Status: kciv1beta1.DatabaseStatus{ InstanceRef: &instanceRef, }, } @@ -69,29 +69,29 @@ func newPostgresTestDbCr(instanceRef kciv1alpha1.DbInstance) *kciv1alpha1.Databa return &db } -func newMysqlTestDbCr() *kciv1alpha1.Database { +func newMysqlTestDbCr() *kciv1beta1.Database { o := metav1.ObjectMeta{Namespace: "TestNS"} - s := kciv1alpha1.DatabaseSpec{SecretName: "TestSec"} + s := kciv1beta1.DatabaseSpec{SecretName: "TestSec"} info := make(map[string]string) info["DB_PORT"] = "3306" info["DB_CONN"] = "mysql" - db := kciv1alpha1.Database{ + db := kciv1beta1.Database{ ObjectMeta: o, Spec: s, - Status: kciv1alpha1.DatabaseStatus{ - InstanceRef: &kciv1alpha1.DbInstance{ - Spec: kciv1alpha1.DbInstanceSpec{ + Status: kciv1beta1.DatabaseStatus{ + InstanceRef: &kciv1beta1.DbInstance{ + Spec: kciv1beta1.DbInstanceSpec{ Engine: "mysql", - DbInstanceSource: kciv1alpha1.DbInstanceSource{ - Generic: &kciv1alpha1.GenericInstance{ + DbInstanceSource: kciv1beta1.DbInstanceSource{ + Generic: &kciv1beta1.GenericInstance{ Host: test.GetMysqlHost(), Port: test.GetMysqlPort(), }, }, }, - Status: kciv1alpha1.DbInstanceStatus{Info: info}, + Status: kciv1beta1.DbInstanceStatus{Info: info}, }, }, } @@ -162,9 +162,9 @@ func testAdminSecret(namespace, secretName string) (*corev1.Secret, error) { } func TestSpecChanged(t *testing.T) { - dbin := &kciv1alpha1.DbInstance{} - before := kciv1alpha1.DbInstanceSpec{ - AdminUserSecret: kciv1alpha1.NamespacedName{ + dbin := &kciv1beta1.DbInstance{} + before := kciv1beta1.DbInstanceSpec{ + AdminUserSecret: kciv1beta1.NamespacedName{ Namespace: "test", Name: "secret1", }, @@ -177,8 +177,8 @@ func TestSpecChanged(t *testing.T) { nochange := isDBInstanceSpecChanged(ctx, dbin) assert.Equal(t, nochange, false, "expected false") - after := kciv1alpha1.DbInstanceSpec{ - AdminUserSecret: kciv1alpha1.NamespacedName{ + after := kciv1beta1.DbInstanceSpec{ + AdminUserSecret: kciv1beta1.NamespacedName{ Namespace: "test", Name: "secret2", }, @@ -189,10 +189,10 @@ func TestSpecChanged(t *testing.T) { } func TestConfigChanged(t *testing.T) { - dbin := &kciv1alpha1.DbInstance{} - dbin.Spec.Google = &kciv1alpha1.GoogleInstance{ + dbin := &kciv1beta1.DbInstance{} + dbin.Spec.Google = &kciv1beta1.GoogleInstance{ InstanceName: "test", - ConfigmapName: kciv1alpha1.NamespacedName{ + ConfigmapName: kciv1beta1.NamespacedName{ Namespace: "testNS", Name: "test", }, @@ -213,7 +213,7 @@ func TestConfigChanged(t *testing.T) { } func TestAddChecksumStatus(t *testing.T) { - dbin := &kciv1alpha1.DbInstance{} + dbin := &kciv1beta1.DbInstance{} addDBInstanceChecksumStatus(context.Background(), dbin) checksums := dbin.Status.Checksums assert.NotEqual(t, checksums, map[string]string{}, "annotation should have checksum") diff --git a/controllers/proxy_helper.go b/controllers/proxy_helper.go index b2597a85..d59ae5d3 100644 --- a/controllers/proxy_helper.go +++ b/controllers/proxy_helper.go @@ -23,7 +23,7 @@ import ( "strconv" "strings" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/config" "github.com/kloeckner-i/db-operator/pkg/utils/kci" proxy "github.com/kloeckner-i/db-operator/pkg/utils/proxy" @@ -38,7 +38,7 @@ var ( ErrNoProxySupport = errors.New("no proxy supported backend type") ) -func determineProxyTypeForDB(conf *config.Config, dbcr *kciv1alpha1.Database) (proxy.Proxy, error) { +func determineProxyTypeForDB(conf *config.Config, dbcr *kciv1beta1.Database) (proxy.Proxy, error) { logrus.Debugf("DB: namespace=%s, name=%s - determinProxyType", dbcr.Namespace, dbcr.Name) backend, err := dbcr.GetBackendType() if err != nil { @@ -96,7 +96,7 @@ func determineProxyTypeForDB(conf *config.Config, dbcr *kciv1alpha1.Database) (p } } -func determineProxyTypeForInstance(conf *config.Config, dbin *kciv1alpha1.DbInstance) (proxy.Proxy, error) { +func determineProxyTypeForInstance(conf *config.Config, dbin *kciv1beta1.DbInstance) (proxy.Proxy, error) { logrus.Debugf("Instance: name=%s - determinProxyType", dbin.Name) operatorNamespace, err := getOperatorNamespace() if err != nil { diff --git a/controllers/proxy_helper_test.go b/controllers/proxy_helper_test.go index 167cdf80..a009fc98 100644 --- a/controllers/proxy_helper_test.go +++ b/controllers/proxy_helper_test.go @@ -21,40 +21,40 @@ import ( "testing" "bou.ke/monkey" - kciv1alpha1 "github.com/kloeckner-i/db-operator/api/v1alpha1" + kciv1beta1 "github.com/kloeckner-i/db-operator/api/v1beta1" "github.com/kloeckner-i/db-operator/pkg/config" "github.com/kloeckner-i/db-operator/pkg/utils/proxy" "github.com/stretchr/testify/assert" ) -func makeGsqlInstance() kciv1alpha1.DbInstance { +func makeGsqlInstance() kciv1beta1.DbInstance { info := make(map[string]string) info["DB_CONN"] = "test-conn" info["DB_PORT"] = "1234" - dbInstance := kciv1alpha1.DbInstance{ - Spec: kciv1alpha1.DbInstanceSpec{ - DbInstanceSource: kciv1alpha1.DbInstanceSource{ - Google: &kciv1alpha1.GoogleInstance{}, + dbInstance := kciv1beta1.DbInstance{ + Spec: kciv1beta1.DbInstanceSpec{ + DbInstanceSource: kciv1beta1.DbInstanceSource{ + Google: &kciv1beta1.GoogleInstance{}, }, }, - Status: kciv1alpha1.DbInstanceStatus{ + Status: kciv1beta1.DbInstanceStatus{ Info: info, }, } return dbInstance } -func makeGenericInstance() kciv1alpha1.DbInstance { +func makeGenericInstance() kciv1beta1.DbInstance { info := make(map[string]string) info["DB_CONN"] = "test-conn" info["DB_PORT"] = "1234" - dbInstance := kciv1alpha1.DbInstance{ - Spec: kciv1alpha1.DbInstanceSpec{ - DbInstanceSource: kciv1alpha1.DbInstanceSource{ - Generic: &kciv1alpha1.GenericInstance{}, + dbInstance := kciv1beta1.DbInstance{ + Spec: kciv1beta1.DbInstanceSpec{ + DbInstanceSource: kciv1beta1.DbInstanceSource{ + Generic: &kciv1beta1.GenericInstance{}, }, }, - Status: kciv1alpha1.DbInstanceStatus{ + Status: kciv1beta1.DbInstanceStatus{ Info: info, }, } diff --git a/docs/creatingdatabases.md b/docs/creatingdatabases.md index ce62f87c..2163dbd0 100644 --- a/docs/creatingdatabases.md +++ b/docs/creatingdatabases.md @@ -33,7 +33,7 @@ For more details about how it works check [here](howitworks.md) Create Database custom resource ```YAML -apiVersion: "kci.rocks/v1alpha1" +apiVersion: "kci.rocks/v1beta1" kind: "Database" metadata: name: "example-db" @@ -48,6 +48,7 @@ spec: CONNECTION_STRING: "jdbc:{{ .Protocol }}://{{ .UserName }}:{{ .Password }}@{{ .DatabaseHost }}:{{ .DatabasePort }}/{{ .DatabaseName }}" PASSWORD_USER: "{{ .Password }}_{{ .UserName }}" ``` + With `secretsTemplates` you can add fields to the database secret that are composed by any string and by any of the following templated values: ```YAML - Protocol: Depending on db engine. Possible values are mysql/postgresql @@ -135,7 +136,7 @@ data: By default ConfigMaps and Secrets are created without an Owner Reference, so they won't be removed if the `Database` resource is removed. If you want it to be deleted too, you need to turn on the cleanup function. ```YAML -apiVersion: "kci.rocks/v1alpha1" +apiVersion: "kci.rocks/v1beta1" kind: "Database" metadata: name: "example-db" @@ -228,7 +229,7 @@ PostgreSQL extensions listed under `spec.extensions` will be enabled by DB Opera DB Operator execute `CREATE EXTENSION IF NOT EXISTS` on the target database. ```YAML -apiVersion: "kci.rocks/v1alpha1" +apiVersion: "kci.rocks/v1beta1" kind: "Database" metadata: name: "example-db" diff --git a/docs/creatinginstances.md b/docs/creatinginstances.md index 9fbf9ace..9e4f6305 100644 --- a/docs/creatinginstances.md +++ b/docs/creatinginstances.md @@ -29,7 +29,7 @@ Or use existing secret created by stable mysql/postgres helm chart. Create **DbInstance** custom resource. ```YAML -apiVersion: kci.rocks/v1alpha1 +apiVersion: kci.rocks/v1beta1 kind: DbInstance metadata: name: example-generic @@ -80,7 +80,7 @@ data: ``` Configure `DbInstance` like below. ```YAML -apiVersion: kci.rocks/v1alpha1 +apiVersion: kci.rocks/v1beta1 kind: DbInstance metadata: name: example-gsql @@ -130,7 +130,7 @@ kubectl create secret generic example-gsql-admin-secret --from-literal=user=